Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16#include "misc.h"
  17#include "tree-mod-log.h"
  18#include "fs.h"
  19#include "accessors.h"
  20#include "extent-tree.h"
  21#include "relocation.h"
  22#include "tree-checker.h"
  23
  24/* Just arbitrary numbers so we can be sure one of these happened. */
  25#define BACKREF_FOUND_SHARED     6
  26#define BACKREF_FOUND_NOT_SHARED 7
  27
  28struct extent_inode_elem {
  29	u64 inum;
  30	u64 offset;
  31	u64 num_bytes;
  32	struct extent_inode_elem *next;
  33};
  34
  35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
  36			      const struct btrfs_key *key,
  37			      const struct extent_buffer *eb,
  38			      const struct btrfs_file_extent_item *fi,
  39			      struct extent_inode_elem **eie)
  40{
  41	const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
  42	u64 offset = key->offset;
  43	struct extent_inode_elem *e;
  44	const u64 *root_ids;
  45	int root_count;
  46	bool cached;
  47
  48	if (!btrfs_file_extent_compression(eb, fi) &&
 
  49	    !btrfs_file_extent_encryption(eb, fi) &&
  50	    !btrfs_file_extent_other_encoding(eb, fi)) {
  51		u64 data_offset;
  52
  53		data_offset = btrfs_file_extent_offset(eb, fi);
  54
  55		if (ctx->extent_item_pos < data_offset ||
  56		    ctx->extent_item_pos >= data_offset + data_len)
  57			return 1;
  58		offset += ctx->extent_item_pos - data_offset;
  59	}
  60
  61	if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
  62		goto add_inode_elem;
  63
  64	cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
  65				   &root_count);
  66	if (!cached)
  67		goto add_inode_elem;
  68
  69	for (int i = 0; i < root_count; i++) {
  70		int ret;
  71
  72		ret = ctx->indirect_ref_iterator(key->objectid, offset,
  73						 data_len, root_ids[i],
  74						 ctx->user_ctx);
  75		if (ret)
  76			return ret;
  77	}
  78
  79add_inode_elem:
  80	e = kmalloc(sizeof(*e), GFP_NOFS);
  81	if (!e)
  82		return -ENOMEM;
  83
  84	e->next = *eie;
  85	e->inum = key->objectid;
  86	e->offset = offset;
  87	e->num_bytes = data_len;
  88	*eie = e;
  89
  90	return 0;
  91}
  92
  93static void free_inode_elem_list(struct extent_inode_elem *eie)
  94{
  95	struct extent_inode_elem *eie_next;
  96
  97	for (; eie; eie = eie_next) {
  98		eie_next = eie->next;
  99		kfree(eie);
 100	}
 101}
 102
 103static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
 104			     const struct extent_buffer *eb,
 105			     struct extent_inode_elem **eie)
 106{
 107	u64 disk_byte;
 108	struct btrfs_key key;
 109	struct btrfs_file_extent_item *fi;
 110	int slot;
 111	int nritems;
 112	int extent_type;
 113	int ret;
 114
 115	/*
 116	 * from the shared data ref, we only have the leaf but we need
 117	 * the key. thus, we must look into all items and see that we
 118	 * find one (some) with a reference to our extent item.
 119	 */
 120	nritems = btrfs_header_nritems(eb);
 121	for (slot = 0; slot < nritems; ++slot) {
 122		btrfs_item_key_to_cpu(eb, &key, slot);
 123		if (key.type != BTRFS_EXTENT_DATA_KEY)
 124			continue;
 125		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 126		extent_type = btrfs_file_extent_type(eb, fi);
 127		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 128			continue;
 129		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 130		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 131		if (disk_byte != ctx->bytenr)
 132			continue;
 133
 134		ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
 135		if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 136			return ret;
 137	}
 138
 139	return 0;
 140}
 141
 142struct preftree {
 143	struct rb_root_cached root;
 144	unsigned int count;
 145};
 146
 147#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 148
 149struct preftrees {
 150	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 151	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 152	struct preftree indirect_missing_keys;
 153};
 154
 155/*
 156 * Checks for a shared extent during backref search.
 157 *
 158 * The share_count tracks prelim_refs (direct and indirect) having a
 159 * ref->count >0:
 160 *  - incremented when a ref->count transitions to >0
 161 *  - decremented when a ref->count transitions to <1
 162 */
 163struct share_check {
 164	struct btrfs_backref_share_check_ctx *ctx;
 165	struct btrfs_root *root;
 166	u64 inum;
 167	u64 data_bytenr;
 168	u64 data_extent_gen;
 169	/*
 170	 * Counts number of inodes that refer to an extent (different inodes in
 171	 * the same root or different roots) that we could find. The sharedness
 172	 * check typically stops once this counter gets greater than 1, so it
 173	 * may not reflect the total number of inodes.
 174	 */
 175	int share_count;
 176	/*
 177	 * The number of times we found our inode refers to the data extent we
 178	 * are determining the sharedness. In other words, how many file extent
 179	 * items we could find for our inode that point to our target data
 180	 * extent. The value we get here after finishing the extent sharedness
 181	 * check may be smaller than reality, but if it ends up being greater
 182	 * than 1, then we know for sure the inode has multiple file extent
 183	 * items that point to our inode, and we can safely assume it's useful
 184	 * to cache the sharedness check result.
 185	 */
 186	int self_ref_count;
 187	bool have_delayed_delete_refs;
 188};
 189
 190static inline int extent_is_shared(struct share_check *sc)
 191{
 192	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 193}
 194
 195static struct kmem_cache *btrfs_prelim_ref_cache;
 196
 197int __init btrfs_prelim_ref_init(void)
 198{
 199	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 200					sizeof(struct prelim_ref),
 201					0,
 202					SLAB_MEM_SPREAD,
 203					NULL);
 204	if (!btrfs_prelim_ref_cache)
 205		return -ENOMEM;
 206	return 0;
 207}
 208
 209void __cold btrfs_prelim_ref_exit(void)
 210{
 211	kmem_cache_destroy(btrfs_prelim_ref_cache);
 212}
 213
 214static void free_pref(struct prelim_ref *ref)
 215{
 216	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 217}
 218
 219/*
 220 * Return 0 when both refs are for the same block (and can be merged).
 221 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 222 * indicates a 'higher' block.
 223 */
 224static int prelim_ref_compare(struct prelim_ref *ref1,
 225			      struct prelim_ref *ref2)
 226{
 227	if (ref1->level < ref2->level)
 228		return -1;
 229	if (ref1->level > ref2->level)
 230		return 1;
 231	if (ref1->root_id < ref2->root_id)
 232		return -1;
 233	if (ref1->root_id > ref2->root_id)
 234		return 1;
 235	if (ref1->key_for_search.type < ref2->key_for_search.type)
 236		return -1;
 237	if (ref1->key_for_search.type > ref2->key_for_search.type)
 238		return 1;
 239	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 240		return -1;
 241	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 242		return 1;
 243	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 244		return -1;
 245	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 246		return 1;
 247	if (ref1->parent < ref2->parent)
 248		return -1;
 249	if (ref1->parent > ref2->parent)
 250		return 1;
 251
 252	return 0;
 253}
 254
 255static void update_share_count(struct share_check *sc, int oldcount,
 256			       int newcount, struct prelim_ref *newref)
 257{
 258	if ((!sc) || (oldcount == 0 && newcount < 1))
 259		return;
 260
 261	if (oldcount > 0 && newcount < 1)
 262		sc->share_count--;
 263	else if (oldcount < 1 && newcount > 0)
 264		sc->share_count++;
 265
 266	if (newref->root_id == sc->root->root_key.objectid &&
 267	    newref->wanted_disk_byte == sc->data_bytenr &&
 268	    newref->key_for_search.objectid == sc->inum)
 269		sc->self_ref_count += newref->count;
 270}
 271
 272/*
 273 * Add @newref to the @root rbtree, merging identical refs.
 274 *
 275 * Callers should assume that newref has been freed after calling.
 276 */
 277static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 278			      struct preftree *preftree,
 279			      struct prelim_ref *newref,
 280			      struct share_check *sc)
 281{
 282	struct rb_root_cached *root;
 283	struct rb_node **p;
 284	struct rb_node *parent = NULL;
 285	struct prelim_ref *ref;
 286	int result;
 287	bool leftmost = true;
 288
 289	root = &preftree->root;
 290	p = &root->rb_root.rb_node;
 291
 292	while (*p) {
 293		parent = *p;
 294		ref = rb_entry(parent, struct prelim_ref, rbnode);
 295		result = prelim_ref_compare(ref, newref);
 296		if (result < 0) {
 297			p = &(*p)->rb_left;
 298		} else if (result > 0) {
 299			p = &(*p)->rb_right;
 300			leftmost = false;
 301		} else {
 302			/* Identical refs, merge them and free @newref */
 303			struct extent_inode_elem *eie = ref->inode_list;
 304
 305			while (eie && eie->next)
 306				eie = eie->next;
 307
 308			if (!eie)
 309				ref->inode_list = newref->inode_list;
 310			else
 311				eie->next = newref->inode_list;
 312			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 313						     preftree->count);
 314			/*
 315			 * A delayed ref can have newref->count < 0.
 316			 * The ref->count is updated to follow any
 317			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 318			 */
 319			update_share_count(sc, ref->count,
 320					   ref->count + newref->count, newref);
 321			ref->count += newref->count;
 322			free_pref(newref);
 323			return;
 324		}
 325	}
 326
 327	update_share_count(sc, 0, newref->count, newref);
 328	preftree->count++;
 329	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 330	rb_link_node(&newref->rbnode, parent, p);
 331	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 332}
 333
 334/*
 335 * Release the entire tree.  We don't care about internal consistency so
 336 * just free everything and then reset the tree root.
 337 */
 338static void prelim_release(struct preftree *preftree)
 339{
 340	struct prelim_ref *ref, *next_ref;
 341
 342	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 343					     &preftree->root.rb_root, rbnode) {
 344		free_inode_elem_list(ref->inode_list);
 345		free_pref(ref);
 346	}
 347
 348	preftree->root = RB_ROOT_CACHED;
 349	preftree->count = 0;
 350}
 351
 352/*
 353 * the rules for all callers of this function are:
 354 * - obtaining the parent is the goal
 355 * - if you add a key, you must know that it is a correct key
 356 * - if you cannot add the parent or a correct key, then we will look into the
 357 *   block later to set a correct key
 358 *
 359 * delayed refs
 360 * ============
 361 *        backref type | shared | indirect | shared | indirect
 362 * information         |   tree |     tree |   data |     data
 363 * --------------------+--------+----------+--------+----------
 364 *      parent logical |    y   |     -    |    -   |     -
 365 *      key to resolve |    -   |     y    |    y   |     y
 366 *  tree block logical |    -   |     -    |    -   |     -
 367 *  root for resolving |    y   |     y    |    y   |     y
 368 *
 369 * - column 1:       we've the parent -> done
 370 * - column 2, 3, 4: we use the key to find the parent
 371 *
 372 * on disk refs (inline or keyed)
 373 * ==============================
 374 *        backref type | shared | indirect | shared | indirect
 375 * information         |   tree |     tree |   data |     data
 376 * --------------------+--------+----------+--------+----------
 377 *      parent logical |    y   |     -    |    y   |     -
 378 *      key to resolve |    -   |     -    |    -   |     y
 379 *  tree block logical |    y   |     y    |    y   |     y
 380 *  root for resolving |    -   |     y    |    y   |     y
 381 *
 382 * - column 1, 3: we've the parent -> done
 383 * - column 2:    we take the first key from the block to find the parent
 384 *                (see add_missing_keys)
 385 * - column 4:    we use the key to find the parent
 386 *
 387 * additional information that's available but not required to find the parent
 388 * block might help in merging entries to gain some speed.
 389 */
 390static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 391			  struct preftree *preftree, u64 root_id,
 392			  const struct btrfs_key *key, int level, u64 parent,
 393			  u64 wanted_disk_byte, int count,
 394			  struct share_check *sc, gfp_t gfp_mask)
 395{
 396	struct prelim_ref *ref;
 397
 398	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 399		return 0;
 400
 401	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 402	if (!ref)
 403		return -ENOMEM;
 404
 405	ref->root_id = root_id;
 406	if (key)
 407		ref->key_for_search = *key;
 408	else
 409		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 410
 411	ref->inode_list = NULL;
 412	ref->level = level;
 413	ref->count = count;
 414	ref->parent = parent;
 415	ref->wanted_disk_byte = wanted_disk_byte;
 416	prelim_ref_insert(fs_info, preftree, ref, sc);
 417	return extent_is_shared(sc);
 418}
 419
 420/* direct refs use root == 0, key == NULL */
 421static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 422			  struct preftrees *preftrees, int level, u64 parent,
 423			  u64 wanted_disk_byte, int count,
 424			  struct share_check *sc, gfp_t gfp_mask)
 425{
 426	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 427			      parent, wanted_disk_byte, count, sc, gfp_mask);
 428}
 429
 430/* indirect refs use parent == 0 */
 431static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 432			    struct preftrees *preftrees, u64 root_id,
 433			    const struct btrfs_key *key, int level,
 434			    u64 wanted_disk_byte, int count,
 435			    struct share_check *sc, gfp_t gfp_mask)
 436{
 437	struct preftree *tree = &preftrees->indirect;
 438
 439	if (!key)
 440		tree = &preftrees->indirect_missing_keys;
 441	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 442			      wanted_disk_byte, count, sc, gfp_mask);
 443}
 444
 445static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
 446{
 447	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
 448	struct rb_node *parent = NULL;
 449	struct prelim_ref *ref = NULL;
 450	struct prelim_ref target = {};
 451	int result;
 452
 453	target.parent = bytenr;
 454
 455	while (*p) {
 456		parent = *p;
 457		ref = rb_entry(parent, struct prelim_ref, rbnode);
 458		result = prelim_ref_compare(ref, &target);
 459
 460		if (result < 0)
 461			p = &(*p)->rb_left;
 462		else if (result > 0)
 463			p = &(*p)->rb_right;
 464		else
 465			return 1;
 466	}
 467	return 0;
 468}
 469
 470static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
 471			   struct btrfs_root *root, struct btrfs_path *path,
 472			   struct ulist *parents,
 473			   struct preftrees *preftrees, struct prelim_ref *ref,
 474			   int level)
 475{
 476	int ret = 0;
 477	int slot;
 478	struct extent_buffer *eb;
 479	struct btrfs_key key;
 480	struct btrfs_key *key_for_search = &ref->key_for_search;
 481	struct btrfs_file_extent_item *fi;
 482	struct extent_inode_elem *eie = NULL, *old = NULL;
 483	u64 disk_byte;
 484	u64 wanted_disk_byte = ref->wanted_disk_byte;
 485	u64 count = 0;
 486	u64 data_offset;
 487	u8 type;
 488
 489	if (level != 0) {
 490		eb = path->nodes[level];
 491		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 492		if (ret < 0)
 493			return ret;
 494		return 0;
 495	}
 496
 497	/*
 498	 * 1. We normally enter this function with the path already pointing to
 499	 *    the first item to check. But sometimes, we may enter it with
 500	 *    slot == nritems.
 501	 * 2. We are searching for normal backref but bytenr of this leaf
 502	 *    matches shared data backref
 503	 * 3. The leaf owner is not equal to the root we are searching
 504	 *
 505	 * For these cases, go to the next leaf before we continue.
 506	 */
 507	eb = path->nodes[0];
 508	if (path->slots[0] >= btrfs_header_nritems(eb) ||
 509	    is_shared_data_backref(preftrees, eb->start) ||
 510	    ref->root_id != btrfs_header_owner(eb)) {
 511		if (ctx->time_seq == BTRFS_SEQ_LAST)
 512			ret = btrfs_next_leaf(root, path);
 513		else
 514			ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 515	}
 516
 517	while (!ret && count < ref->count) {
 518		eb = path->nodes[0];
 519		slot = path->slots[0];
 520
 521		btrfs_item_key_to_cpu(eb, &key, slot);
 522
 523		if (key.objectid != key_for_search->objectid ||
 524		    key.type != BTRFS_EXTENT_DATA_KEY)
 525			break;
 526
 527		/*
 528		 * We are searching for normal backref but bytenr of this leaf
 529		 * matches shared data backref, OR
 530		 * the leaf owner is not equal to the root we are searching for
 531		 */
 532		if (slot == 0 &&
 533		    (is_shared_data_backref(preftrees, eb->start) ||
 534		     ref->root_id != btrfs_header_owner(eb))) {
 535			if (ctx->time_seq == BTRFS_SEQ_LAST)
 536				ret = btrfs_next_leaf(root, path);
 537			else
 538				ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 539			continue;
 540		}
 541		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 542		type = btrfs_file_extent_type(eb, fi);
 543		if (type == BTRFS_FILE_EXTENT_INLINE)
 544			goto next;
 545		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 546		data_offset = btrfs_file_extent_offset(eb, fi);
 547
 548		if (disk_byte == wanted_disk_byte) {
 549			eie = NULL;
 550			old = NULL;
 551			if (ref->key_for_search.offset == key.offset - data_offset)
 552				count++;
 553			else
 554				goto next;
 555			if (!ctx->ignore_extent_item_pos) {
 556				ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
 557				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
 558				    ret < 0)
 559					break;
 560			}
 561			if (ret > 0)
 562				goto next;
 563			ret = ulist_add_merge_ptr(parents, eb->start,
 564						  eie, (void **)&old, GFP_NOFS);
 565			if (ret < 0)
 566				break;
 567			if (!ret && !ctx->ignore_extent_item_pos) {
 568				while (old->next)
 569					old = old->next;
 570				old->next = eie;
 571			}
 572			eie = NULL;
 573		}
 574next:
 575		if (ctx->time_seq == BTRFS_SEQ_LAST)
 576			ret = btrfs_next_item(root, path);
 577		else
 578			ret = btrfs_next_old_item(root, path, ctx->time_seq);
 579	}
 580
 581	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 582		free_inode_elem_list(eie);
 583	else if (ret > 0)
 584		ret = 0;
 585
 586	return ret;
 587}
 588
 589/*
 590 * resolve an indirect backref in the form (root_id, key, level)
 591 * to a logical address
 592 */
 593static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
 594				struct btrfs_path *path,
 595				struct preftrees *preftrees,
 596				struct prelim_ref *ref, struct ulist *parents)
 597{
 598	struct btrfs_root *root;
 599	struct extent_buffer *eb;
 600	int ret = 0;
 601	int root_level;
 602	int level = ref->level;
 603	struct btrfs_key search_key = ref->key_for_search;
 604
 605	/*
 606	 * If we're search_commit_root we could possibly be holding locks on
 607	 * other tree nodes.  This happens when qgroups does backref walks when
 608	 * adding new delayed refs.  To deal with this we need to look in cache
 609	 * for the root, and if we don't find it then we need to search the
 610	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
 611	 * here.
 612	 */
 613	if (path->search_commit_root)
 614		root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
 615	else
 616		root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
 617	if (IS_ERR(root)) {
 618		ret = PTR_ERR(root);
 619		goto out_free;
 620	}
 621
 622	if (!path->search_commit_root &&
 623	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
 624		ret = -ENOENT;
 625		goto out;
 626	}
 627
 628	if (btrfs_is_testing(ctx->fs_info)) {
 629		ret = -ENOENT;
 630		goto out;
 631	}
 632
 633	if (path->search_commit_root)
 634		root_level = btrfs_header_level(root->commit_root);
 635	else if (ctx->time_seq == BTRFS_SEQ_LAST)
 636		root_level = btrfs_header_level(root->node);
 637	else
 638		root_level = btrfs_old_root_level(root, ctx->time_seq);
 639
 640	if (root_level + 1 == level)
 641		goto out;
 642
 643	/*
 644	 * We can often find data backrefs with an offset that is too large
 645	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
 646	 * subtracting a file's offset with the data offset of its
 647	 * corresponding extent data item. This can happen for example in the
 648	 * clone ioctl.
 649	 *
 650	 * So if we detect such case we set the search key's offset to zero to
 651	 * make sure we will find the matching file extent item at
 652	 * add_all_parents(), otherwise we will miss it because the offset
 653	 * taken form the backref is much larger then the offset of the file
 654	 * extent item. This can make us scan a very large number of file
 655	 * extent items, but at least it will not make us miss any.
 656	 *
 657	 * This is an ugly workaround for a behaviour that should have never
 658	 * existed, but it does and a fix for the clone ioctl would touch a lot
 659	 * of places, cause backwards incompatibility and would not fix the
 660	 * problem for extents cloned with older kernels.
 661	 */
 662	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
 663	    search_key.offset >= LLONG_MAX)
 664		search_key.offset = 0;
 665	path->lowest_level = level;
 666	if (ctx->time_seq == BTRFS_SEQ_LAST)
 667		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 668	else
 669		ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
 670
 671	btrfs_debug(ctx->fs_info,
 672		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 673		 ref->root_id, level, ref->count, ret,
 674		 ref->key_for_search.objectid, ref->key_for_search.type,
 675		 ref->key_for_search.offset);
 676	if (ret < 0)
 677		goto out;
 678
 679	eb = path->nodes[level];
 680	while (!eb) {
 681		if (WARN_ON(!level)) {
 682			ret = 1;
 683			goto out;
 684		}
 685		level--;
 686		eb = path->nodes[level];
 687	}
 688
 689	ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
 690out:
 691	btrfs_put_root(root);
 692out_free:
 693	path->lowest_level = 0;
 694	btrfs_release_path(path);
 695	return ret;
 696}
 697
 698static struct extent_inode_elem *
 699unode_aux_to_inode_list(struct ulist_node *node)
 700{
 701	if (!node)
 702		return NULL;
 703	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 704}
 705
 706static void free_leaf_list(struct ulist *ulist)
 707{
 708	struct ulist_node *node;
 709	struct ulist_iterator uiter;
 710
 711	ULIST_ITER_INIT(&uiter);
 712	while ((node = ulist_next(ulist, &uiter)))
 713		free_inode_elem_list(unode_aux_to_inode_list(node));
 714
 715	ulist_free(ulist);
 716}
 717
 718/*
 719 * We maintain three separate rbtrees: one for direct refs, one for
 720 * indirect refs which have a key, and one for indirect refs which do not
 721 * have a key. Each tree does merge on insertion.
 722 *
 723 * Once all of the references are located, we iterate over the tree of
 724 * indirect refs with missing keys. An appropriate key is located and
 725 * the ref is moved onto the tree for indirect refs. After all missing
 726 * keys are thus located, we iterate over the indirect ref tree, resolve
 727 * each reference, and then insert the resolved reference onto the
 728 * direct tree (merging there too).
 729 *
 730 * New backrefs (i.e., for parent nodes) are added to the appropriate
 731 * rbtree as they are encountered. The new backrefs are subsequently
 732 * resolved as above.
 733 */
 734static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
 735				 struct btrfs_path *path,
 736				 struct preftrees *preftrees,
 737				 struct share_check *sc)
 738{
 739	int err;
 740	int ret = 0;
 741	struct ulist *parents;
 742	struct ulist_node *node;
 743	struct ulist_iterator uiter;
 744	struct rb_node *rnode;
 745
 746	parents = ulist_alloc(GFP_NOFS);
 747	if (!parents)
 748		return -ENOMEM;
 749
 750	/*
 751	 * We could trade memory usage for performance here by iterating
 752	 * the tree, allocating new refs for each insertion, and then
 753	 * freeing the entire indirect tree when we're done.  In some test
 754	 * cases, the tree can grow quite large (~200k objects).
 755	 */
 756	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 757		struct prelim_ref *ref;
 758
 759		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 760		if (WARN(ref->parent,
 761			 "BUG: direct ref found in indirect tree")) {
 762			ret = -EINVAL;
 763			goto out;
 764		}
 765
 766		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 767		preftrees->indirect.count--;
 768
 769		if (ref->count == 0) {
 770			free_pref(ref);
 771			continue;
 772		}
 773
 774		if (sc && ref->root_id != sc->root->root_key.objectid) {
 775			free_pref(ref);
 776			ret = BACKREF_FOUND_SHARED;
 777			goto out;
 778		}
 779		err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
 780		/*
 781		 * we can only tolerate ENOENT,otherwise,we should catch error
 782		 * and return directly.
 783		 */
 784		if (err == -ENOENT) {
 785			prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
 786					  NULL);
 787			continue;
 788		} else if (err) {
 789			free_pref(ref);
 790			ret = err;
 791			goto out;
 792		}
 793
 794		/* we put the first parent into the ref at hand */
 795		ULIST_ITER_INIT(&uiter);
 796		node = ulist_next(parents, &uiter);
 797		ref->parent = node ? node->val : 0;
 798		ref->inode_list = unode_aux_to_inode_list(node);
 799
 800		/* Add a prelim_ref(s) for any other parent(s). */
 801		while ((node = ulist_next(parents, &uiter))) {
 802			struct prelim_ref *new_ref;
 803
 804			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 805						   GFP_NOFS);
 806			if (!new_ref) {
 807				free_pref(ref);
 808				ret = -ENOMEM;
 809				goto out;
 810			}
 811			memcpy(new_ref, ref, sizeof(*ref));
 812			new_ref->parent = node->val;
 813			new_ref->inode_list = unode_aux_to_inode_list(node);
 814			prelim_ref_insert(ctx->fs_info, &preftrees->direct,
 815					  new_ref, NULL);
 816		}
 817
 818		/*
 819		 * Now it's a direct ref, put it in the direct tree. We must
 820		 * do this last because the ref could be merged/freed here.
 821		 */
 822		prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
 823
 824		ulist_reinit(parents);
 825		cond_resched();
 826	}
 827out:
 828	/*
 829	 * We may have inode lists attached to refs in the parents ulist, so we
 830	 * must free them before freeing the ulist and its refs.
 831	 */
 832	free_leaf_list(parents);
 833	return ret;
 834}
 835
 836/*
 837 * read tree blocks and add keys where required.
 838 */
 839static int add_missing_keys(struct btrfs_fs_info *fs_info,
 840			    struct preftrees *preftrees, bool lock)
 841{
 842	struct prelim_ref *ref;
 843	struct extent_buffer *eb;
 844	struct preftree *tree = &preftrees->indirect_missing_keys;
 845	struct rb_node *node;
 846
 847	while ((node = rb_first_cached(&tree->root))) {
 848		struct btrfs_tree_parent_check check = { 0 };
 849
 850		ref = rb_entry(node, struct prelim_ref, rbnode);
 851		rb_erase_cached(node, &tree->root);
 852
 853		BUG_ON(ref->parent);	/* should not be a direct ref */
 854		BUG_ON(ref->key_for_search.type);
 855		BUG_ON(!ref->wanted_disk_byte);
 856
 857		check.level = ref->level - 1;
 858		check.owner_root = ref->root_id;
 859
 860		eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
 861		if (IS_ERR(eb)) {
 862			free_pref(ref);
 863			return PTR_ERR(eb);
 864		}
 865		if (!extent_buffer_uptodate(eb)) {
 866			free_pref(ref);
 867			free_extent_buffer(eb);
 868			return -EIO;
 869		}
 870
 871		if (lock)
 872			btrfs_tree_read_lock(eb);
 873		if (btrfs_header_level(eb) == 0)
 874			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 875		else
 876			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 877		if (lock)
 878			btrfs_tree_read_unlock(eb);
 879		free_extent_buffer(eb);
 880		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 881		cond_resched();
 882	}
 883	return 0;
 884}
 885
 886/*
 887 * add all currently queued delayed refs from this head whose seq nr is
 888 * smaller or equal that seq to the list
 889 */
 890static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 891			    struct btrfs_delayed_ref_head *head, u64 seq,
 892			    struct preftrees *preftrees, struct share_check *sc)
 893{
 894	struct btrfs_delayed_ref_node *node;
 895	struct btrfs_key key;
 896	struct rb_node *n;
 897	int count;
 898	int ret = 0;
 899
 900	spin_lock(&head->lock);
 901	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 902		node = rb_entry(n, struct btrfs_delayed_ref_node,
 903				ref_node);
 904		if (node->seq > seq)
 905			continue;
 906
 907		switch (node->action) {
 908		case BTRFS_ADD_DELAYED_EXTENT:
 909		case BTRFS_UPDATE_DELAYED_HEAD:
 910			WARN_ON(1);
 911			continue;
 912		case BTRFS_ADD_DELAYED_REF:
 913			count = node->ref_mod;
 914			break;
 915		case BTRFS_DROP_DELAYED_REF:
 916			count = node->ref_mod * -1;
 917			break;
 918		default:
 919			BUG();
 920		}
 921		switch (node->type) {
 922		case BTRFS_TREE_BLOCK_REF_KEY: {
 923			/* NORMAL INDIRECT METADATA backref */
 924			struct btrfs_delayed_tree_ref *ref;
 925			struct btrfs_key *key_ptr = NULL;
 
 
 926
 927			if (head->extent_op && head->extent_op->update_key) {
 928				btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
 929				key_ptr = &key;
 930			}
 931
 932			ref = btrfs_delayed_node_to_tree_ref(node);
 933			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 934					       key_ptr, ref->level + 1,
 935					       node->bytenr, count, sc,
 936					       GFP_ATOMIC);
 937			break;
 938		}
 939		case BTRFS_SHARED_BLOCK_REF_KEY: {
 940			/* SHARED DIRECT METADATA backref */
 941			struct btrfs_delayed_tree_ref *ref;
 942
 943			ref = btrfs_delayed_node_to_tree_ref(node);
 
 
 944
 945			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
 946					     ref->parent, node->bytenr, count,
 947					     sc, GFP_ATOMIC);
 948			break;
 949		}
 950		case BTRFS_EXTENT_DATA_REF_KEY: {
 951			/* NORMAL INDIRECT DATA backref */
 952			struct btrfs_delayed_data_ref *ref;
 953			ref = btrfs_delayed_node_to_data_ref(node);
 954
 955			key.objectid = ref->objectid;
 956			key.type = BTRFS_EXTENT_DATA_KEY;
 957			key.offset = ref->offset;
 958
 959			/*
 960			 * If we have a share check context and a reference for
 961			 * another inode, we can't exit immediately. This is
 962			 * because even if this is a BTRFS_ADD_DELAYED_REF
 963			 * reference we may find next a BTRFS_DROP_DELAYED_REF
 964			 * which cancels out this ADD reference.
 965			 *
 966			 * If this is a DROP reference and there was no previous
 967			 * ADD reference, then we need to signal that when we
 968			 * process references from the extent tree (through
 969			 * add_inline_refs() and add_keyed_refs()), we should
 970			 * not exit early if we find a reference for another
 971			 * inode, because one of the delayed DROP references
 972			 * may cancel that reference in the extent tree.
 973			 */
 974			if (sc && count < 0)
 975				sc->have_delayed_delete_refs = true;
 976
 977			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 978					       &key, 0, node->bytenr, count, sc,
 979					       GFP_ATOMIC);
 980			break;
 981		}
 982		case BTRFS_SHARED_DATA_REF_KEY: {
 983			/* SHARED DIRECT FULL backref */
 984			struct btrfs_delayed_data_ref *ref;
 985
 986			ref = btrfs_delayed_node_to_data_ref(node);
 987
 988			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
 989					     node->bytenr, count, sc,
 990					     GFP_ATOMIC);
 991			break;
 992		}
 993		default:
 994			WARN_ON(1);
 995		}
 996		/*
 997		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 998		 * refs have been checked.
 999		 */
1000		if (ret && (ret != BACKREF_FOUND_SHARED))
1001			break;
1002	}
1003	if (!ret)
1004		ret = extent_is_shared(sc);
1005
1006	spin_unlock(&head->lock);
1007	return ret;
1008}
1009
1010/*
1011 * add all inline backrefs for bytenr to the list
1012 *
1013 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1014 */
1015static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1016			   struct btrfs_path *path,
1017			   int *info_level, struct preftrees *preftrees,
1018			   struct share_check *sc)
1019{
1020	int ret = 0;
1021	int slot;
1022	struct extent_buffer *leaf;
1023	struct btrfs_key key;
1024	struct btrfs_key found_key;
1025	unsigned long ptr;
1026	unsigned long end;
1027	struct btrfs_extent_item *ei;
1028	u64 flags;
1029	u64 item_size;
1030
1031	/*
1032	 * enumerate all inline refs
1033	 */
1034	leaf = path->nodes[0];
1035	slot = path->slots[0];
1036
1037	item_size = btrfs_item_size(leaf, slot);
1038	BUG_ON(item_size < sizeof(*ei));
1039
1040	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1041
1042	if (ctx->check_extent_item) {
1043		ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1044		if (ret)
1045			return ret;
1046	}
1047
1048	flags = btrfs_extent_flags(leaf, ei);
1049	btrfs_item_key_to_cpu(leaf, &found_key, slot);
1050
1051	ptr = (unsigned long)(ei + 1);
1052	end = (unsigned long)ei + item_size;
1053
1054	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1055	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1056		struct btrfs_tree_block_info *info;
1057
1058		info = (struct btrfs_tree_block_info *)ptr;
1059		*info_level = btrfs_tree_block_level(leaf, info);
1060		ptr += sizeof(struct btrfs_tree_block_info);
1061		BUG_ON(ptr > end);
1062	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1063		*info_level = found_key.offset;
1064	} else {
1065		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1066	}
1067
1068	while (ptr < end) {
1069		struct btrfs_extent_inline_ref *iref;
1070		u64 offset;
1071		int type;
1072
1073		iref = (struct btrfs_extent_inline_ref *)ptr;
1074		type = btrfs_get_extent_inline_ref_type(leaf, iref,
1075							BTRFS_REF_TYPE_ANY);
1076		if (type == BTRFS_REF_TYPE_INVALID)
1077			return -EUCLEAN;
1078
1079		offset = btrfs_extent_inline_ref_offset(leaf, iref);
1080
1081		switch (type) {
1082		case BTRFS_SHARED_BLOCK_REF_KEY:
1083			ret = add_direct_ref(ctx->fs_info, preftrees,
1084					     *info_level + 1, offset,
1085					     ctx->bytenr, 1, NULL, GFP_NOFS);
1086			break;
1087		case BTRFS_SHARED_DATA_REF_KEY: {
1088			struct btrfs_shared_data_ref *sdref;
1089			int count;
1090
1091			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1092			count = btrfs_shared_data_ref_count(leaf, sdref);
1093
1094			ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1095					     ctx->bytenr, count, sc, GFP_NOFS);
1096			break;
1097		}
1098		case BTRFS_TREE_BLOCK_REF_KEY:
1099			ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1100					       NULL, *info_level + 1,
1101					       ctx->bytenr, 1, NULL, GFP_NOFS);
1102			break;
1103		case BTRFS_EXTENT_DATA_REF_KEY: {
1104			struct btrfs_extent_data_ref *dref;
1105			int count;
1106			u64 root;
1107
1108			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1109			count = btrfs_extent_data_ref_count(leaf, dref);
1110			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1111								      dref);
1112			key.type = BTRFS_EXTENT_DATA_KEY;
1113			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1114
1115			if (sc && key.objectid != sc->inum &&
1116			    !sc->have_delayed_delete_refs) {
1117				ret = BACKREF_FOUND_SHARED;
1118				break;
1119			}
1120
1121			root = btrfs_extent_data_ref_root(leaf, dref);
1122
1123			if (!ctx->skip_data_ref ||
1124			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1125						ctx->user_ctx))
1126				ret = add_indirect_ref(ctx->fs_info, preftrees,
1127						       root, &key, 0, ctx->bytenr,
1128						       count, sc, GFP_NOFS);
1129			break;
1130		}
 
 
 
1131		default:
1132			WARN_ON(1);
1133		}
1134		if (ret)
1135			return ret;
1136		ptr += btrfs_extent_inline_ref_size(type);
1137	}
1138
1139	return 0;
1140}
1141
1142/*
1143 * add all non-inline backrefs for bytenr to the list
1144 *
1145 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1146 */
1147static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1148			  struct btrfs_root *extent_root,
1149			  struct btrfs_path *path,
1150			  int info_level, struct preftrees *preftrees,
1151			  struct share_check *sc)
1152{
1153	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1154	int ret;
1155	int slot;
1156	struct extent_buffer *leaf;
1157	struct btrfs_key key;
1158
1159	while (1) {
1160		ret = btrfs_next_item(extent_root, path);
1161		if (ret < 0)
1162			break;
1163		if (ret) {
1164			ret = 0;
1165			break;
1166		}
1167
1168		slot = path->slots[0];
1169		leaf = path->nodes[0];
1170		btrfs_item_key_to_cpu(leaf, &key, slot);
1171
1172		if (key.objectid != ctx->bytenr)
1173			break;
1174		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1175			continue;
1176		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1177			break;
1178
1179		switch (key.type) {
1180		case BTRFS_SHARED_BLOCK_REF_KEY:
1181			/* SHARED DIRECT METADATA backref */
1182			ret = add_direct_ref(fs_info, preftrees,
1183					     info_level + 1, key.offset,
1184					     ctx->bytenr, 1, NULL, GFP_NOFS);
1185			break;
1186		case BTRFS_SHARED_DATA_REF_KEY: {
1187			/* SHARED DIRECT FULL backref */
1188			struct btrfs_shared_data_ref *sdref;
1189			int count;
1190
1191			sdref = btrfs_item_ptr(leaf, slot,
1192					      struct btrfs_shared_data_ref);
1193			count = btrfs_shared_data_ref_count(leaf, sdref);
1194			ret = add_direct_ref(fs_info, preftrees, 0,
1195					     key.offset, ctx->bytenr, count,
1196					     sc, GFP_NOFS);
1197			break;
1198		}
1199		case BTRFS_TREE_BLOCK_REF_KEY:
1200			/* NORMAL INDIRECT METADATA backref */
1201			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1202					       NULL, info_level + 1, ctx->bytenr,
1203					       1, NULL, GFP_NOFS);
1204			break;
1205		case BTRFS_EXTENT_DATA_REF_KEY: {
1206			/* NORMAL INDIRECT DATA backref */
1207			struct btrfs_extent_data_ref *dref;
1208			int count;
1209			u64 root;
1210
1211			dref = btrfs_item_ptr(leaf, slot,
1212					      struct btrfs_extent_data_ref);
1213			count = btrfs_extent_data_ref_count(leaf, dref);
1214			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1215								      dref);
1216			key.type = BTRFS_EXTENT_DATA_KEY;
1217			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1218
1219			if (sc && key.objectid != sc->inum &&
1220			    !sc->have_delayed_delete_refs) {
1221				ret = BACKREF_FOUND_SHARED;
1222				break;
1223			}
1224
1225			root = btrfs_extent_data_ref_root(leaf, dref);
1226
1227			if (!ctx->skip_data_ref ||
1228			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1229						ctx->user_ctx))
1230				ret = add_indirect_ref(fs_info, preftrees, root,
1231						       &key, 0, ctx->bytenr,
1232						       count, sc, GFP_NOFS);
1233			break;
1234		}
1235		default:
1236			WARN_ON(1);
1237		}
1238		if (ret)
1239			return ret;
1240
1241	}
1242
1243	return ret;
1244}
1245
1246/*
1247 * The caller has joined a transaction or is holding a read lock on the
1248 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1249 * snapshot field changing while updating or checking the cache.
1250 */
1251static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1252					struct btrfs_root *root,
1253					u64 bytenr, int level, bool *is_shared)
1254{
 
1255	struct btrfs_backref_shared_cache_entry *entry;
1256
 
 
 
1257	if (!ctx->use_path_cache)
1258		return false;
1259
1260	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1261		return false;
1262
1263	/*
1264	 * Level -1 is used for the data extent, which is not reliable to cache
1265	 * because its reference count can increase or decrease without us
1266	 * realizing. We cache results only for extent buffers that lead from
1267	 * the root node down to the leaf with the file extent item.
1268	 */
1269	ASSERT(level >= 0);
1270
1271	entry = &ctx->path_cache_entries[level];
1272
1273	/* Unused cache entry or being used for some other extent buffer. */
1274	if (entry->bytenr != bytenr)
1275		return false;
1276
1277	/*
1278	 * We cached a false result, but the last snapshot generation of the
1279	 * root changed, so we now have a snapshot. Don't trust the result.
1280	 */
1281	if (!entry->is_shared &&
1282	    entry->gen != btrfs_root_last_snapshot(&root->root_item))
1283		return false;
1284
1285	/*
1286	 * If we cached a true result and the last generation used for dropping
1287	 * a root changed, we can not trust the result, because the dropped root
1288	 * could be a snapshot sharing this extent buffer.
1289	 */
1290	if (entry->is_shared &&
1291	    entry->gen != btrfs_get_last_root_drop_gen(root->fs_info))
1292		return false;
1293
1294	*is_shared = entry->is_shared;
1295	/*
1296	 * If the node at this level is shared, than all nodes below are also
1297	 * shared. Currently some of the nodes below may be marked as not shared
1298	 * because we have just switched from one leaf to another, and switched
1299	 * also other nodes above the leaf and below the current level, so mark
1300	 * them as shared.
1301	 */
1302	if (*is_shared) {
1303		for (int i = 0; i < level; i++) {
1304			ctx->path_cache_entries[i].is_shared = true;
1305			ctx->path_cache_entries[i].gen = entry->gen;
1306		}
1307	}
1308
1309	return true;
1310}
1311
1312/*
1313 * The caller has joined a transaction or is holding a read lock on the
1314 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1315 * snapshot field changing while updating or checking the cache.
1316 */
1317static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1318				       struct btrfs_root *root,
1319				       u64 bytenr, int level, bool is_shared)
1320{
 
1321	struct btrfs_backref_shared_cache_entry *entry;
1322	u64 gen;
1323
 
 
 
1324	if (!ctx->use_path_cache)
1325		return;
1326
1327	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1328		return;
1329
1330	/*
1331	 * Level -1 is used for the data extent, which is not reliable to cache
1332	 * because its reference count can increase or decrease without us
1333	 * realizing. We cache results only for extent buffers that lead from
1334	 * the root node down to the leaf with the file extent item.
1335	 */
1336	ASSERT(level >= 0);
1337
1338	if (is_shared)
1339		gen = btrfs_get_last_root_drop_gen(root->fs_info);
1340	else
1341		gen = btrfs_root_last_snapshot(&root->root_item);
1342
1343	entry = &ctx->path_cache_entries[level];
1344	entry->bytenr = bytenr;
1345	entry->is_shared = is_shared;
1346	entry->gen = gen;
1347
1348	/*
1349	 * If we found an extent buffer is shared, set the cache result for all
1350	 * extent buffers below it to true. As nodes in the path are COWed,
1351	 * their sharedness is moved to their children, and if a leaf is COWed,
1352	 * then the sharedness of a data extent becomes direct, the refcount of
1353	 * data extent is increased in the extent item at the extent tree.
1354	 */
1355	if (is_shared) {
1356		for (int i = 0; i < level; i++) {
1357			entry = &ctx->path_cache_entries[i];
1358			entry->is_shared = is_shared;
1359			entry->gen = gen;
1360		}
1361	}
1362}
1363
1364/*
1365 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1366 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1367 * indirect refs to their parent bytenr.
1368 * When roots are found, they're added to the roots list
1369 *
1370 * @ctx:     Backref walking context object, must be not NULL.
1371 * @sc:      If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1372 *           shared extent is detected.
1373 *
1374 * Otherwise this returns 0 for success and <0 for an error.
1375 *
1376 * FIXME some caching might speed things up
1377 */
1378static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1379			     struct share_check *sc)
1380{
1381	struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1382	struct btrfs_key key;
1383	struct btrfs_path *path;
1384	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1385	struct btrfs_delayed_ref_head *head;
1386	int info_level = 0;
1387	int ret;
1388	struct prelim_ref *ref;
1389	struct rb_node *node;
1390	struct extent_inode_elem *eie = NULL;
1391	struct preftrees preftrees = {
1392		.direct = PREFTREE_INIT,
1393		.indirect = PREFTREE_INIT,
1394		.indirect_missing_keys = PREFTREE_INIT
1395	};
1396
1397	/* Roots ulist is not needed when using a sharedness check context. */
1398	if (sc)
1399		ASSERT(ctx->roots == NULL);
1400
1401	key.objectid = ctx->bytenr;
1402	key.offset = (u64)-1;
1403	if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1404		key.type = BTRFS_METADATA_ITEM_KEY;
1405	else
1406		key.type = BTRFS_EXTENT_ITEM_KEY;
1407
1408	path = btrfs_alloc_path();
1409	if (!path)
1410		return -ENOMEM;
1411	if (!ctx->trans) {
1412		path->search_commit_root = 1;
1413		path->skip_locking = 1;
1414	}
1415
1416	if (ctx->time_seq == BTRFS_SEQ_LAST)
1417		path->skip_locking = 1;
1418
1419again:
1420	head = NULL;
1421
1422	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1423	if (ret < 0)
1424		goto out;
1425	if (ret == 0) {
1426		/* This shouldn't happen, indicates a bug or fs corruption. */
1427		ASSERT(ret != 0);
 
 
1428		ret = -EUCLEAN;
1429		goto out;
1430	}
1431
1432	if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1433	    ctx->time_seq != BTRFS_SEQ_LAST) {
1434		/*
1435		 * We have a specific time_seq we care about and trans which
1436		 * means we have the path lock, we need to grab the ref head and
1437		 * lock it so we have a consistent view of the refs at the given
1438		 * time.
1439		 */
1440		delayed_refs = &ctx->trans->transaction->delayed_refs;
1441		spin_lock(&delayed_refs->lock);
1442		head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
 
1443		if (head) {
1444			if (!mutex_trylock(&head->mutex)) {
1445				refcount_inc(&head->refs);
1446				spin_unlock(&delayed_refs->lock);
1447
1448				btrfs_release_path(path);
1449
1450				/*
1451				 * Mutex was contended, block until it's
1452				 * released and try again
1453				 */
1454				mutex_lock(&head->mutex);
1455				mutex_unlock(&head->mutex);
1456				btrfs_put_delayed_ref_head(head);
1457				goto again;
1458			}
1459			spin_unlock(&delayed_refs->lock);
1460			ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1461					       &preftrees, sc);
1462			mutex_unlock(&head->mutex);
1463			if (ret)
1464				goto out;
1465		} else {
1466			spin_unlock(&delayed_refs->lock);
1467		}
1468	}
1469
1470	if (path->slots[0]) {
1471		struct extent_buffer *leaf;
1472		int slot;
1473
1474		path->slots[0]--;
1475		leaf = path->nodes[0];
1476		slot = path->slots[0];
1477		btrfs_item_key_to_cpu(leaf, &key, slot);
1478		if (key.objectid == ctx->bytenr &&
1479		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1480		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1481			ret = add_inline_refs(ctx, path, &info_level,
1482					      &preftrees, sc);
1483			if (ret)
1484				goto out;
1485			ret = add_keyed_refs(ctx, root, path, info_level,
1486					     &preftrees, sc);
1487			if (ret)
1488				goto out;
1489		}
1490	}
1491
1492	/*
1493	 * If we have a share context and we reached here, it means the extent
1494	 * is not directly shared (no multiple reference items for it),
1495	 * otherwise we would have exited earlier with a return value of
1496	 * BACKREF_FOUND_SHARED after processing delayed references or while
1497	 * processing inline or keyed references from the extent tree.
1498	 * The extent may however be indirectly shared through shared subtrees
1499	 * as a result from creating snapshots, so we determine below what is
1500	 * its parent node, in case we are dealing with a metadata extent, or
1501	 * what's the leaf (or leaves), from a fs tree, that has a file extent
1502	 * item pointing to it in case we are dealing with a data extent.
1503	 */
1504	ASSERT(extent_is_shared(sc) == 0);
1505
1506	/*
1507	 * If we are here for a data extent and we have a share_check structure
1508	 * it means the data extent is not directly shared (does not have
1509	 * multiple reference items), so we have to check if a path in the fs
1510	 * tree (going from the root node down to the leaf that has the file
1511	 * extent item pointing to the data extent) is shared, that is, if any
1512	 * of the extent buffers in the path is referenced by other trees.
1513	 */
1514	if (sc && ctx->bytenr == sc->data_bytenr) {
1515		/*
1516		 * If our data extent is from a generation more recent than the
1517		 * last generation used to snapshot the root, then we know that
1518		 * it can not be shared through subtrees, so we can skip
1519		 * resolving indirect references, there's no point in
1520		 * determining the extent buffers for the path from the fs tree
1521		 * root node down to the leaf that has the file extent item that
1522		 * points to the data extent.
1523		 */
1524		if (sc->data_extent_gen >
1525		    btrfs_root_last_snapshot(&sc->root->root_item)) {
1526			ret = BACKREF_FOUND_NOT_SHARED;
1527			goto out;
1528		}
1529
1530		/*
1531		 * If we are only determining if a data extent is shared or not
1532		 * and the corresponding file extent item is located in the same
1533		 * leaf as the previous file extent item, we can skip resolving
1534		 * indirect references for a data extent, since the fs tree path
1535		 * is the same (same leaf, so same path). We skip as long as the
1536		 * cached result for the leaf is valid and only if there's only
1537		 * one file extent item pointing to the data extent, because in
1538		 * the case of multiple file extent items, they may be located
1539		 * in different leaves and therefore we have multiple paths.
1540		 */
1541		if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1542		    sc->self_ref_count == 1) {
1543			bool cached;
1544			bool is_shared;
1545
1546			cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1547						     sc->ctx->curr_leaf_bytenr,
1548						     0, &is_shared);
1549			if (cached) {
1550				if (is_shared)
1551					ret = BACKREF_FOUND_SHARED;
1552				else
1553					ret = BACKREF_FOUND_NOT_SHARED;
1554				goto out;
1555			}
1556		}
1557	}
1558
1559	btrfs_release_path(path);
1560
1561	ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1562	if (ret)
1563		goto out;
1564
1565	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1566
1567	ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
1568	if (ret)
1569		goto out;
1570
1571	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1572
1573	/*
1574	 * This walks the tree of merged and resolved refs. Tree blocks are
1575	 * read in as needed. Unique entries are added to the ulist, and
1576	 * the list of found roots is updated.
1577	 *
1578	 * We release the entire tree in one go before returning.
1579	 */
1580	node = rb_first_cached(&preftrees.direct.root);
1581	while (node) {
1582		ref = rb_entry(node, struct prelim_ref, rbnode);
1583		node = rb_next(&ref->rbnode);
1584		/*
1585		 * ref->count < 0 can happen here if there are delayed
1586		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1587		 * prelim_ref_insert() relies on this when merging
1588		 * identical refs to keep the overall count correct.
1589		 * prelim_ref_insert() will merge only those refs
1590		 * which compare identically.  Any refs having
1591		 * e.g. different offsets would not be merged,
1592		 * and would retain their original ref->count < 0.
1593		 */
1594		if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
1595			/* no parent == root of tree */
1596			ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1597			if (ret < 0)
1598				goto out;
1599		}
1600		if (ref->count && ref->parent) {
1601			if (!ctx->ignore_extent_item_pos && !ref->inode_list &&
1602			    ref->level == 0) {
1603				struct btrfs_tree_parent_check check = { 0 };
1604				struct extent_buffer *eb;
1605
1606				check.level = ref->level;
1607
1608				eb = read_tree_block(ctx->fs_info, ref->parent,
1609						     &check);
1610				if (IS_ERR(eb)) {
1611					ret = PTR_ERR(eb);
1612					goto out;
1613				}
1614				if (!extent_buffer_uptodate(eb)) {
1615					free_extent_buffer(eb);
1616					ret = -EIO;
1617					goto out;
1618				}
1619
1620				if (!path->skip_locking)
1621					btrfs_tree_read_lock(eb);
1622				ret = find_extent_in_eb(ctx, eb, &eie);
1623				if (!path->skip_locking)
1624					btrfs_tree_read_unlock(eb);
1625				free_extent_buffer(eb);
1626				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1627				    ret < 0)
1628					goto out;
1629				ref->inode_list = eie;
1630				/*
1631				 * We transferred the list ownership to the ref,
1632				 * so set to NULL to avoid a double free in case
1633				 * an error happens after this.
1634				 */
1635				eie = NULL;
1636			}
1637			ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1638						  ref->inode_list,
1639						  (void **)&eie, GFP_NOFS);
1640			if (ret < 0)
1641				goto out;
1642			if (!ret && !ctx->ignore_extent_item_pos) {
1643				/*
1644				 * We've recorded that parent, so we must extend
1645				 * its inode list here.
1646				 *
1647				 * However if there was corruption we may not
1648				 * have found an eie, return an error in this
1649				 * case.
1650				 */
1651				ASSERT(eie);
1652				if (!eie) {
1653					ret = -EUCLEAN;
1654					goto out;
1655				}
1656				while (eie->next)
1657					eie = eie->next;
1658				eie->next = ref->inode_list;
1659			}
1660			eie = NULL;
1661			/*
1662			 * We have transferred the inode list ownership from
1663			 * this ref to the ref we added to the 'refs' ulist.
1664			 * So set this ref's inode list to NULL to avoid
1665			 * use-after-free when our caller uses it or double
1666			 * frees in case an error happens before we return.
1667			 */
1668			ref->inode_list = NULL;
1669		}
1670		cond_resched();
1671	}
1672
1673out:
1674	btrfs_free_path(path);
1675
1676	prelim_release(&preftrees.direct);
1677	prelim_release(&preftrees.indirect);
1678	prelim_release(&preftrees.indirect_missing_keys);
1679
1680	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1681		free_inode_elem_list(eie);
1682	return ret;
1683}
1684
1685/*
1686 * Finds all leaves with a reference to the specified combination of
1687 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1688 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1689 * function. The caller should free the ulist with free_leaf_list() if
1690 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1691 * enough.
1692 *
1693 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1694 */
1695int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
1696{
1697	int ret;
1698
1699	ASSERT(ctx->refs == NULL);
1700
1701	ctx->refs = ulist_alloc(GFP_NOFS);
1702	if (!ctx->refs)
1703		return -ENOMEM;
1704
1705	ret = find_parent_nodes(ctx, NULL);
1706	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1707	    (ret < 0 && ret != -ENOENT)) {
1708		free_leaf_list(ctx->refs);
1709		ctx->refs = NULL;
1710		return ret;
1711	}
1712
1713	return 0;
1714}
1715
1716/*
1717 * Walk all backrefs for a given extent to find all roots that reference this
1718 * extent. Walking a backref means finding all extents that reference this
1719 * extent and in turn walk the backrefs of those, too. Naturally this is a
1720 * recursive process, but here it is implemented in an iterative fashion: We
1721 * find all referencing extents for the extent in question and put them on a
1722 * list. In turn, we find all referencing extents for those, further appending
1723 * to the list. The way we iterate the list allows adding more elements after
1724 * the current while iterating. The process stops when we reach the end of the
1725 * list.
1726 *
1727 * Found roots are added to @ctx->roots, which is allocated by this function if
1728 * it points to NULL, in which case the caller is responsible for freeing it
1729 * after it's not needed anymore.
1730 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1731 * ulist to do temporary work, and frees it before returning.
1732 *
1733 * Returns 0 on success, < 0 on error.
1734 */
1735static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
1736{
1737	const u64 orig_bytenr = ctx->bytenr;
1738	const bool orig_ignore_extent_item_pos = ctx->ignore_extent_item_pos;
1739	bool roots_ulist_allocated = false;
1740	struct ulist_iterator uiter;
1741	int ret = 0;
1742
1743	ASSERT(ctx->refs == NULL);
1744
1745	ctx->refs = ulist_alloc(GFP_NOFS);
1746	if (!ctx->refs)
1747		return -ENOMEM;
1748
1749	if (!ctx->roots) {
1750		ctx->roots = ulist_alloc(GFP_NOFS);
1751		if (!ctx->roots) {
1752			ulist_free(ctx->refs);
1753			ctx->refs = NULL;
1754			return -ENOMEM;
1755		}
1756		roots_ulist_allocated = true;
1757	}
1758
1759	ctx->ignore_extent_item_pos = true;
1760
1761	ULIST_ITER_INIT(&uiter);
1762	while (1) {
1763		struct ulist_node *node;
1764
1765		ret = find_parent_nodes(ctx, NULL);
1766		if (ret < 0 && ret != -ENOENT) {
1767			if (roots_ulist_allocated) {
1768				ulist_free(ctx->roots);
1769				ctx->roots = NULL;
1770			}
1771			break;
1772		}
1773		ret = 0;
1774		node = ulist_next(ctx->refs, &uiter);
1775		if (!node)
1776			break;
1777		ctx->bytenr = node->val;
1778		cond_resched();
1779	}
1780
1781	ulist_free(ctx->refs);
1782	ctx->refs = NULL;
1783	ctx->bytenr = orig_bytenr;
1784	ctx->ignore_extent_item_pos = orig_ignore_extent_item_pos;
1785
1786	return ret;
1787}
1788
1789int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1790			 bool skip_commit_root_sem)
1791{
1792	int ret;
1793
1794	if (!ctx->trans && !skip_commit_root_sem)
1795		down_read(&ctx->fs_info->commit_root_sem);
1796	ret = btrfs_find_all_roots_safe(ctx);
1797	if (!ctx->trans && !skip_commit_root_sem)
1798		up_read(&ctx->fs_info->commit_root_sem);
1799	return ret;
1800}
1801
1802struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1803{
1804	struct btrfs_backref_share_check_ctx *ctx;
1805
1806	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1807	if (!ctx)
1808		return NULL;
1809
1810	ulist_init(&ctx->refs);
1811
1812	return ctx;
1813}
1814
1815void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1816{
1817	if (!ctx)
1818		return;
1819
1820	ulist_release(&ctx->refs);
1821	kfree(ctx);
1822}
1823
1824/*
1825 * Check if a data extent is shared or not.
1826 *
1827 * @inode:       The inode whose extent we are checking.
1828 * @bytenr:      Logical bytenr of the extent we are checking.
1829 * @extent_gen:  Generation of the extent (file extent item) or 0 if it is
1830 *               not known.
1831 * @ctx:         A backref sharedness check context.
1832 *
1833 * btrfs_is_data_extent_shared uses the backref walking code but will short
1834 * circuit as soon as it finds a root or inode that doesn't match the
1835 * one passed in. This provides a significant performance benefit for
1836 * callers (such as fiemap) which want to know whether the extent is
1837 * shared but do not need a ref count.
1838 *
1839 * This attempts to attach to the running transaction in order to account for
1840 * delayed refs, but continues on even when no running transaction exists.
1841 *
1842 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1843 */
1844int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1845				u64 extent_gen,
1846				struct btrfs_backref_share_check_ctx *ctx)
1847{
1848	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1849	struct btrfs_root *root = inode->root;
1850	struct btrfs_fs_info *fs_info = root->fs_info;
1851	struct btrfs_trans_handle *trans;
1852	struct ulist_iterator uiter;
1853	struct ulist_node *node;
1854	struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1855	int ret = 0;
1856	struct share_check shared = {
1857		.ctx = ctx,
1858		.root = root,
1859		.inum = btrfs_ino(inode),
1860		.data_bytenr = bytenr,
1861		.data_extent_gen = extent_gen,
1862		.share_count = 0,
1863		.self_ref_count = 0,
1864		.have_delayed_delete_refs = false,
1865	};
1866	int level;
 
 
1867
1868	for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1869		if (ctx->prev_extents_cache[i].bytenr == bytenr)
1870			return ctx->prev_extents_cache[i].is_shared;
1871	}
1872
1873	ulist_init(&ctx->refs);
1874
1875	trans = btrfs_join_transaction_nostart(root);
1876	if (IS_ERR(trans)) {
1877		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1878			ret = PTR_ERR(trans);
1879			goto out;
1880		}
1881		trans = NULL;
1882		down_read(&fs_info->commit_root_sem);
1883	} else {
1884		btrfs_get_tree_mod_seq(fs_info, &elem);
1885		walk_ctx.time_seq = elem.seq;
1886	}
1887
1888	walk_ctx.ignore_extent_item_pos = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1889	walk_ctx.trans = trans;
1890	walk_ctx.fs_info = fs_info;
1891	walk_ctx.refs = &ctx->refs;
1892
1893	/* -1 means we are in the bytenr of the data extent. */
1894	level = -1;
1895	ULIST_ITER_INIT(&uiter);
1896	ctx->use_path_cache = true;
1897	while (1) {
1898		bool is_shared;
1899		bool cached;
1900
1901		walk_ctx.bytenr = bytenr;
1902		ret = find_parent_nodes(&walk_ctx, &shared);
1903		if (ret == BACKREF_FOUND_SHARED ||
1904		    ret == BACKREF_FOUND_NOT_SHARED) {
1905			/* If shared must return 1, otherwise return 0. */
1906			ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1907			if (level >= 0)
1908				store_backref_shared_cache(ctx, root, bytenr,
1909							   level, ret == 1);
1910			break;
1911		}
1912		if (ret < 0 && ret != -ENOENT)
1913			break;
1914		ret = 0;
1915
1916		/*
1917		 * If our data extent was not directly shared (without multiple
1918		 * reference items), than it might have a single reference item
1919		 * with a count > 1 for the same offset, which means there are 2
1920		 * (or more) file extent items that point to the data extent -
1921		 * this happens when a file extent item needs to be split and
1922		 * then one item gets moved to another leaf due to a b+tree leaf
1923		 * split when inserting some item. In this case the file extent
1924		 * items may be located in different leaves and therefore some
1925		 * of the leaves may be referenced through shared subtrees while
1926		 * others are not. Since our extent buffer cache only works for
1927		 * a single path (by far the most common case and simpler to
1928		 * deal with), we can not use it if we have multiple leaves
1929		 * (which implies multiple paths).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1930		 */
1931		if (level == -1 && ctx->refs.nnodes > 1)
1932			ctx->use_path_cache = false;
1933
1934		if (level >= 0)
1935			store_backref_shared_cache(ctx, root, bytenr,
1936						   level, false);
1937		node = ulist_next(&ctx->refs, &uiter);
1938		if (!node)
1939			break;
1940		bytenr = node->val;
1941		level++;
1942		cached = lookup_backref_shared_cache(ctx, root, bytenr, level,
1943						     &is_shared);
1944		if (cached) {
1945			ret = (is_shared ? 1 : 0);
1946			break;
 
 
 
 
 
1947		}
1948		shared.share_count = 0;
1949		shared.have_delayed_delete_refs = false;
1950		cond_resched();
1951	}
1952
1953	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1954	 * Cache the sharedness result for the data extent if we know our inode
1955	 * has more than 1 file extent item that refers to the data extent.
1956	 */
1957	if (ret >= 0 && shared.self_ref_count > 1) {
1958		int slot = ctx->prev_extents_cache_slot;
1959
1960		ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
1961		ctx->prev_extents_cache[slot].is_shared = (ret == 1);
1962
1963		slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
1964		ctx->prev_extents_cache_slot = slot;
1965	}
1966
 
1967	if (trans) {
1968		btrfs_put_tree_mod_seq(fs_info, &elem);
1969		btrfs_end_transaction(trans);
1970	} else {
1971		up_read(&fs_info->commit_root_sem);
1972	}
1973out:
1974	ulist_release(&ctx->refs);
1975	ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
1976
1977	return ret;
1978}
1979
1980int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1981			  u64 start_off, struct btrfs_path *path,
1982			  struct btrfs_inode_extref **ret_extref,
1983			  u64 *found_off)
1984{
1985	int ret, slot;
1986	struct btrfs_key key;
1987	struct btrfs_key found_key;
1988	struct btrfs_inode_extref *extref;
1989	const struct extent_buffer *leaf;
1990	unsigned long ptr;
1991
1992	key.objectid = inode_objectid;
1993	key.type = BTRFS_INODE_EXTREF_KEY;
1994	key.offset = start_off;
1995
1996	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1997	if (ret < 0)
1998		return ret;
1999
2000	while (1) {
2001		leaf = path->nodes[0];
2002		slot = path->slots[0];
2003		if (slot >= btrfs_header_nritems(leaf)) {
2004			/*
2005			 * If the item at offset is not found,
2006			 * btrfs_search_slot will point us to the slot
2007			 * where it should be inserted. In our case
2008			 * that will be the slot directly before the
2009			 * next INODE_REF_KEY_V2 item. In the case
2010			 * that we're pointing to the last slot in a
2011			 * leaf, we must move one leaf over.
2012			 */
2013			ret = btrfs_next_leaf(root, path);
2014			if (ret) {
2015				if (ret >= 1)
2016					ret = -ENOENT;
2017				break;
2018			}
2019			continue;
2020		}
2021
2022		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2023
2024		/*
2025		 * Check that we're still looking at an extended ref key for
2026		 * this particular objectid. If we have different
2027		 * objectid or type then there are no more to be found
2028		 * in the tree and we can exit.
2029		 */
2030		ret = -ENOENT;
2031		if (found_key.objectid != inode_objectid)
2032			break;
2033		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2034			break;
2035
2036		ret = 0;
2037		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2038		extref = (struct btrfs_inode_extref *)ptr;
2039		*ret_extref = extref;
2040		if (found_off)
2041			*found_off = found_key.offset;
2042		break;
2043	}
2044
2045	return ret;
2046}
2047
2048/*
2049 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2050 * Elements of the path are separated by '/' and the path is guaranteed to be
2051 * 0-terminated. the path is only given within the current file system.
2052 * Therefore, it never starts with a '/'. the caller is responsible to provide
2053 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2054 * the start point of the resulting string is returned. this pointer is within
2055 * dest, normally.
2056 * in case the path buffer would overflow, the pointer is decremented further
2057 * as if output was written to the buffer, though no more output is actually
2058 * generated. that way, the caller can determine how much space would be
2059 * required for the path to fit into the buffer. in that case, the returned
2060 * value will be smaller than dest. callers must check this!
2061 */
2062char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2063			u32 name_len, unsigned long name_off,
2064			struct extent_buffer *eb_in, u64 parent,
2065			char *dest, u32 size)
2066{
2067	int slot;
2068	u64 next_inum;
2069	int ret;
2070	s64 bytes_left = ((s64)size) - 1;
2071	struct extent_buffer *eb = eb_in;
2072	struct btrfs_key found_key;
2073	struct btrfs_inode_ref *iref;
2074
2075	if (bytes_left >= 0)
2076		dest[bytes_left] = '\0';
2077
2078	while (1) {
2079		bytes_left -= name_len;
2080		if (bytes_left >= 0)
2081			read_extent_buffer(eb, dest + bytes_left,
2082					   name_off, name_len);
2083		if (eb != eb_in) {
2084			if (!path->skip_locking)
2085				btrfs_tree_read_unlock(eb);
2086			free_extent_buffer(eb);
2087		}
2088		ret = btrfs_find_item(fs_root, path, parent, 0,
2089				BTRFS_INODE_REF_KEY, &found_key);
2090		if (ret > 0)
2091			ret = -ENOENT;
2092		if (ret)
2093			break;
2094
2095		next_inum = found_key.offset;
2096
2097		/* regular exit ahead */
2098		if (parent == next_inum)
2099			break;
2100
2101		slot = path->slots[0];
2102		eb = path->nodes[0];
2103		/* make sure we can use eb after releasing the path */
2104		if (eb != eb_in) {
2105			path->nodes[0] = NULL;
2106			path->locks[0] = 0;
2107		}
2108		btrfs_release_path(path);
2109		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2110
2111		name_len = btrfs_inode_ref_name_len(eb, iref);
2112		name_off = (unsigned long)(iref + 1);
2113
2114		parent = next_inum;
2115		--bytes_left;
2116		if (bytes_left >= 0)
2117			dest[bytes_left] = '/';
2118	}
2119
2120	btrfs_release_path(path);
2121
2122	if (ret)
2123		return ERR_PTR(ret);
2124
2125	return dest + bytes_left;
2126}
2127
2128/*
2129 * this makes the path point to (logical EXTENT_ITEM *)
2130 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2131 * tree blocks and <0 on error.
2132 */
2133int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2134			struct btrfs_path *path, struct btrfs_key *found_key,
2135			u64 *flags_ret)
2136{
2137	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2138	int ret;
2139	u64 flags;
2140	u64 size = 0;
2141	u32 item_size;
2142	const struct extent_buffer *eb;
2143	struct btrfs_extent_item *ei;
2144	struct btrfs_key key;
2145
2146	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2147		key.type = BTRFS_METADATA_ITEM_KEY;
2148	else
2149		key.type = BTRFS_EXTENT_ITEM_KEY;
2150	key.objectid = logical;
2151	key.offset = (u64)-1;
2152
2153	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2154	if (ret < 0)
2155		return ret;
 
 
 
 
 
 
 
2156
2157	ret = btrfs_previous_extent_item(extent_root, path, 0);
2158	if (ret) {
2159		if (ret > 0)
2160			ret = -ENOENT;
2161		return ret;
2162	}
2163	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2164	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2165		size = fs_info->nodesize;
2166	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2167		size = found_key->offset;
2168
2169	if (found_key->objectid > logical ||
2170	    found_key->objectid + size <= logical) {
2171		btrfs_debug(fs_info,
2172			"logical %llu is not within any extent", logical);
2173		return -ENOENT;
2174	}
2175
2176	eb = path->nodes[0];
2177	item_size = btrfs_item_size(eb, path->slots[0]);
2178	BUG_ON(item_size < sizeof(*ei));
2179
2180	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2181	flags = btrfs_extent_flags(eb, ei);
2182
2183	btrfs_debug(fs_info,
2184		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2185		 logical, logical - found_key->objectid, found_key->objectid,
2186		 found_key->offset, flags, item_size);
2187
2188	WARN_ON(!flags_ret);
2189	if (flags_ret) {
2190		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2191			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2192		else if (flags & BTRFS_EXTENT_FLAG_DATA)
2193			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
2194		else
2195			BUG();
2196		return 0;
2197	}
2198
2199	return -EIO;
2200}
2201
2202/*
2203 * helper function to iterate extent inline refs. ptr must point to a 0 value
2204 * for the first call and may be modified. it is used to track state.
2205 * if more refs exist, 0 is returned and the next call to
2206 * get_extent_inline_ref must pass the modified ptr parameter to get the
2207 * next ref. after the last ref was processed, 1 is returned.
2208 * returns <0 on error
2209 */
2210static int get_extent_inline_ref(unsigned long *ptr,
2211				 const struct extent_buffer *eb,
2212				 const struct btrfs_key *key,
2213				 const struct btrfs_extent_item *ei,
2214				 u32 item_size,
2215				 struct btrfs_extent_inline_ref **out_eiref,
2216				 int *out_type)
2217{
2218	unsigned long end;
2219	u64 flags;
2220	struct btrfs_tree_block_info *info;
2221
2222	if (!*ptr) {
2223		/* first call */
2224		flags = btrfs_extent_flags(eb, ei);
2225		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2226			if (key->type == BTRFS_METADATA_ITEM_KEY) {
2227				/* a skinny metadata extent */
2228				*out_eiref =
2229				     (struct btrfs_extent_inline_ref *)(ei + 1);
2230			} else {
2231				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2232				info = (struct btrfs_tree_block_info *)(ei + 1);
2233				*out_eiref =
2234				   (struct btrfs_extent_inline_ref *)(info + 1);
2235			}
2236		} else {
2237			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2238		}
2239		*ptr = (unsigned long)*out_eiref;
2240		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2241			return -ENOENT;
2242	}
2243
2244	end = (unsigned long)ei + item_size;
2245	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2246	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2247						     BTRFS_REF_TYPE_ANY);
2248	if (*out_type == BTRFS_REF_TYPE_INVALID)
2249		return -EUCLEAN;
2250
2251	*ptr += btrfs_extent_inline_ref_size(*out_type);
2252	WARN_ON(*ptr > end);
2253	if (*ptr == end)
2254		return 1; /* last */
2255
2256	return 0;
2257}
2258
2259/*
2260 * reads the tree block backref for an extent. tree level and root are returned
2261 * through out_level and out_root. ptr must point to a 0 value for the first
2262 * call and may be modified (see get_extent_inline_ref comment).
2263 * returns 0 if data was provided, 1 if there was no more data to provide or
2264 * <0 on error.
2265 */
2266int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2267			    struct btrfs_key *key, struct btrfs_extent_item *ei,
2268			    u32 item_size, u64 *out_root, u8 *out_level)
2269{
2270	int ret;
2271	int type;
2272	struct btrfs_extent_inline_ref *eiref;
2273
2274	if (*ptr == (unsigned long)-1)
2275		return 1;
2276
2277	while (1) {
2278		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2279					      &eiref, &type);
2280		if (ret < 0)
2281			return ret;
2282
2283		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2284		    type == BTRFS_SHARED_BLOCK_REF_KEY)
2285			break;
2286
2287		if (ret == 1)
2288			return 1;
2289	}
2290
2291	/* we can treat both ref types equally here */
2292	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2293
2294	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2295		struct btrfs_tree_block_info *info;
2296
2297		info = (struct btrfs_tree_block_info *)(ei + 1);
2298		*out_level = btrfs_tree_block_level(eb, info);
2299	} else {
2300		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2301		*out_level = (u8)key->offset;
2302	}
2303
2304	if (ret == 1)
2305		*ptr = (unsigned long)-1;
2306
2307	return 0;
2308}
2309
2310static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2311			     struct extent_inode_elem *inode_list,
2312			     u64 root, u64 extent_item_objectid,
2313			     iterate_extent_inodes_t *iterate, void *ctx)
2314{
2315	struct extent_inode_elem *eie;
2316	int ret = 0;
2317
2318	for (eie = inode_list; eie; eie = eie->next) {
2319		btrfs_debug(fs_info,
2320			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2321			    extent_item_objectid, eie->inum,
2322			    eie->offset, root);
2323		ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2324		if (ret) {
2325			btrfs_debug(fs_info,
2326				    "stopping iteration for %llu due to ret=%d",
2327				    extent_item_objectid, ret);
2328			break;
2329		}
2330	}
2331
2332	return ret;
2333}
2334
2335/*
2336 * calls iterate() for every inode that references the extent identified by
2337 * the given parameters.
2338 * when the iterator function returns a non-zero value, iteration stops.
2339 */
2340int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2341			  bool search_commit_root,
2342			  iterate_extent_inodes_t *iterate, void *user_ctx)
2343{
2344	int ret;
2345	struct ulist *refs;
2346	struct ulist_node *ref_node;
2347	struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2348	struct ulist_iterator ref_uiter;
2349
2350	btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2351		    ctx->bytenr);
2352
2353	ASSERT(ctx->trans == NULL);
2354	ASSERT(ctx->roots == NULL);
2355
2356	if (!search_commit_root) {
2357		struct btrfs_trans_handle *trans;
2358
2359		trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2360		if (IS_ERR(trans)) {
2361			if (PTR_ERR(trans) != -ENOENT &&
2362			    PTR_ERR(trans) != -EROFS)
2363				return PTR_ERR(trans);
2364			trans = NULL;
2365		}
2366		ctx->trans = trans;
2367	}
2368
2369	if (ctx->trans) {
2370		btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2371		ctx->time_seq = seq_elem.seq;
2372	} else {
2373		down_read(&ctx->fs_info->commit_root_sem);
2374	}
2375
2376	ret = btrfs_find_all_leafs(ctx);
2377	if (ret)
2378		goto out;
2379	refs = ctx->refs;
2380	ctx->refs = NULL;
2381
2382	ULIST_ITER_INIT(&ref_uiter);
2383	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2384		const u64 leaf_bytenr = ref_node->val;
2385		struct ulist_node *root_node;
2386		struct ulist_iterator root_uiter;
2387		struct extent_inode_elem *inode_list;
2388
2389		inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2390
2391		if (ctx->cache_lookup) {
2392			const u64 *root_ids;
2393			int root_count;
2394			bool cached;
2395
2396			cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2397						   &root_ids, &root_count);
2398			if (cached) {
2399				for (int i = 0; i < root_count; i++) {
2400					ret = iterate_leaf_refs(ctx->fs_info,
2401								inode_list,
2402								root_ids[i],
2403								leaf_bytenr,
2404								iterate,
2405								user_ctx);
2406					if (ret)
2407						break;
2408				}
2409				continue;
2410			}
2411		}
2412
2413		if (!ctx->roots) {
2414			ctx->roots = ulist_alloc(GFP_NOFS);
2415			if (!ctx->roots) {
2416				ret = -ENOMEM;
2417				break;
2418			}
2419		}
2420
2421		ctx->bytenr = leaf_bytenr;
2422		ret = btrfs_find_all_roots_safe(ctx);
2423		if (ret)
2424			break;
2425
2426		if (ctx->cache_store)
2427			ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2428
2429		ULIST_ITER_INIT(&root_uiter);
2430		while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2431			btrfs_debug(ctx->fs_info,
2432				    "root %llu references leaf %llu, data list %#llx",
2433				    root_node->val, ref_node->val,
2434				    ref_node->aux);
2435			ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2436						root_node->val, ctx->bytenr,
2437						iterate, user_ctx);
2438		}
2439		ulist_reinit(ctx->roots);
2440	}
2441
2442	free_leaf_list(refs);
2443out:
2444	if (ctx->trans) {
2445		btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2446		btrfs_end_transaction(ctx->trans);
2447		ctx->trans = NULL;
2448	} else {
2449		up_read(&ctx->fs_info->commit_root_sem);
2450	}
2451
2452	ulist_free(ctx->roots);
2453	ctx->roots = NULL;
2454
2455	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2456		ret = 0;
2457
2458	return ret;
2459}
2460
2461static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2462{
2463	struct btrfs_data_container *inodes = ctx;
2464	const size_t c = 3 * sizeof(u64);
2465
2466	if (inodes->bytes_left >= c) {
2467		inodes->bytes_left -= c;
2468		inodes->val[inodes->elem_cnt] = inum;
2469		inodes->val[inodes->elem_cnt + 1] = offset;
2470		inodes->val[inodes->elem_cnt + 2] = root;
2471		inodes->elem_cnt += 3;
2472	} else {
2473		inodes->bytes_missing += c - inodes->bytes_left;
2474		inodes->bytes_left = 0;
2475		inodes->elem_missed += 3;
2476	}
2477
2478	return 0;
2479}
2480
2481int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2482				struct btrfs_path *path,
2483				void *ctx, bool ignore_offset)
2484{
2485	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2486	int ret;
2487	u64 flags = 0;
2488	struct btrfs_key found_key;
2489	int search_commit_root = path->search_commit_root;
2490
2491	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2492	btrfs_release_path(path);
2493	if (ret < 0)
2494		return ret;
2495	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2496		return -EINVAL;
2497
2498	walk_ctx.bytenr = found_key.objectid;
2499	if (ignore_offset)
2500		walk_ctx.ignore_extent_item_pos = true;
2501	else
2502		walk_ctx.extent_item_pos = logical - found_key.objectid;
2503	walk_ctx.fs_info = fs_info;
2504
2505	return iterate_extent_inodes(&walk_ctx, search_commit_root,
2506				     build_ino_list, ctx);
2507}
2508
2509static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2510			 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2511
2512static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2513{
2514	int ret = 0;
2515	int slot;
2516	u32 cur;
2517	u32 len;
2518	u32 name_len;
2519	u64 parent = 0;
2520	int found = 0;
2521	struct btrfs_root *fs_root = ipath->fs_root;
2522	struct btrfs_path *path = ipath->btrfs_path;
2523	struct extent_buffer *eb;
2524	struct btrfs_inode_ref *iref;
2525	struct btrfs_key found_key;
2526
2527	while (!ret) {
2528		ret = btrfs_find_item(fs_root, path, inum,
2529				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2530				&found_key);
2531
2532		if (ret < 0)
2533			break;
2534		if (ret) {
2535			ret = found ? 0 : -ENOENT;
2536			break;
2537		}
2538		++found;
2539
2540		parent = found_key.offset;
2541		slot = path->slots[0];
2542		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2543		if (!eb) {
2544			ret = -ENOMEM;
2545			break;
2546		}
2547		btrfs_release_path(path);
2548
2549		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2550
2551		for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2552			name_len = btrfs_inode_ref_name_len(eb, iref);
2553			/* path must be released before calling iterate()! */
2554			btrfs_debug(fs_root->fs_info,
2555				"following ref at offset %u for inode %llu in tree %llu",
2556				cur, found_key.objectid,
2557				fs_root->root_key.objectid);
2558			ret = inode_to_path(parent, name_len,
2559				      (unsigned long)(iref + 1), eb, ipath);
2560			if (ret)
2561				break;
2562			len = sizeof(*iref) + name_len;
2563			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2564		}
2565		free_extent_buffer(eb);
2566	}
2567
2568	btrfs_release_path(path);
2569
2570	return ret;
2571}
2572
2573static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2574{
2575	int ret;
2576	int slot;
2577	u64 offset = 0;
2578	u64 parent;
2579	int found = 0;
2580	struct btrfs_root *fs_root = ipath->fs_root;
2581	struct btrfs_path *path = ipath->btrfs_path;
2582	struct extent_buffer *eb;
2583	struct btrfs_inode_extref *extref;
2584	u32 item_size;
2585	u32 cur_offset;
2586	unsigned long ptr;
2587
2588	while (1) {
2589		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2590					    &offset);
2591		if (ret < 0)
2592			break;
2593		if (ret) {
2594			ret = found ? 0 : -ENOENT;
2595			break;
2596		}
2597		++found;
2598
2599		slot = path->slots[0];
2600		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2601		if (!eb) {
2602			ret = -ENOMEM;
2603			break;
2604		}
2605		btrfs_release_path(path);
2606
2607		item_size = btrfs_item_size(eb, slot);
2608		ptr = btrfs_item_ptr_offset(eb, slot);
2609		cur_offset = 0;
2610
2611		while (cur_offset < item_size) {
2612			u32 name_len;
2613
2614			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2615			parent = btrfs_inode_extref_parent(eb, extref);
2616			name_len = btrfs_inode_extref_name_len(eb, extref);
2617			ret = inode_to_path(parent, name_len,
2618				      (unsigned long)&extref->name, eb, ipath);
2619			if (ret)
2620				break;
2621
2622			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2623			cur_offset += sizeof(*extref);
2624		}
2625		free_extent_buffer(eb);
2626
2627		offset++;
2628	}
2629
2630	btrfs_release_path(path);
2631
2632	return ret;
2633}
2634
2635/*
2636 * returns 0 if the path could be dumped (probably truncated)
2637 * returns <0 in case of an error
2638 */
2639static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2640			 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2641{
2642	char *fspath;
2643	char *fspath_min;
2644	int i = ipath->fspath->elem_cnt;
2645	const int s_ptr = sizeof(char *);
2646	u32 bytes_left;
2647
2648	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2649					ipath->fspath->bytes_left - s_ptr : 0;
2650
2651	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2652	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2653				   name_off, eb, inum, fspath_min, bytes_left);
2654	if (IS_ERR(fspath))
2655		return PTR_ERR(fspath);
2656
2657	if (fspath > fspath_min) {
2658		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2659		++ipath->fspath->elem_cnt;
2660		ipath->fspath->bytes_left = fspath - fspath_min;
2661	} else {
2662		++ipath->fspath->elem_missed;
2663		ipath->fspath->bytes_missing += fspath_min - fspath;
2664		ipath->fspath->bytes_left = 0;
2665	}
2666
2667	return 0;
2668}
2669
2670/*
2671 * this dumps all file system paths to the inode into the ipath struct, provided
2672 * is has been created large enough. each path is zero-terminated and accessed
2673 * from ipath->fspath->val[i].
2674 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2675 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2676 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2677 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2678 * have been needed to return all paths.
2679 */
2680int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2681{
2682	int ret;
2683	int found_refs = 0;
2684
2685	ret = iterate_inode_refs(inum, ipath);
2686	if (!ret)
2687		++found_refs;
2688	else if (ret != -ENOENT)
2689		return ret;
2690
2691	ret = iterate_inode_extrefs(inum, ipath);
2692	if (ret == -ENOENT && found_refs)
2693		return 0;
2694
2695	return ret;
2696}
2697
2698struct btrfs_data_container *init_data_container(u32 total_bytes)
2699{
2700	struct btrfs_data_container *data;
2701	size_t alloc_bytes;
2702
2703	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2704	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2705	if (!data)
2706		return ERR_PTR(-ENOMEM);
2707
2708	if (total_bytes >= sizeof(*data)) {
2709		data->bytes_left = total_bytes - sizeof(*data);
2710		data->bytes_missing = 0;
2711	} else {
2712		data->bytes_missing = sizeof(*data) - total_bytes;
2713		data->bytes_left = 0;
2714	}
2715
2716	data->elem_cnt = 0;
2717	data->elem_missed = 0;
2718
2719	return data;
2720}
2721
2722/*
2723 * allocates space to return multiple file system paths for an inode.
2724 * total_bytes to allocate are passed, note that space usable for actual path
2725 * information will be total_bytes - sizeof(struct inode_fs_paths).
2726 * the returned pointer must be freed with free_ipath() in the end.
2727 */
2728struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2729					struct btrfs_path *path)
2730{
2731	struct inode_fs_paths *ifp;
2732	struct btrfs_data_container *fspath;
2733
2734	fspath = init_data_container(total_bytes);
2735	if (IS_ERR(fspath))
2736		return ERR_CAST(fspath);
2737
2738	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2739	if (!ifp) {
2740		kvfree(fspath);
2741		return ERR_PTR(-ENOMEM);
2742	}
2743
2744	ifp->btrfs_path = path;
2745	ifp->fspath = fspath;
2746	ifp->fs_root = fs_root;
2747
2748	return ifp;
2749}
2750
2751void free_ipath(struct inode_fs_paths *ipath)
2752{
2753	if (!ipath)
2754		return;
2755	kvfree(ipath->fspath);
2756	kfree(ipath);
2757}
2758
2759struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
2760{
2761	struct btrfs_backref_iter *ret;
2762
2763	ret = kzalloc(sizeof(*ret), GFP_NOFS);
2764	if (!ret)
2765		return NULL;
2766
2767	ret->path = btrfs_alloc_path();
2768	if (!ret->path) {
2769		kfree(ret);
2770		return NULL;
2771	}
2772
2773	/* Current backref iterator only supports iteration in commit root */
2774	ret->path->search_commit_root = 1;
2775	ret->path->skip_locking = 1;
2776	ret->fs_info = fs_info;
2777
2778	return ret;
2779}
2780
 
 
 
 
 
 
 
 
 
 
2781int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2782{
2783	struct btrfs_fs_info *fs_info = iter->fs_info;
2784	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2785	struct btrfs_path *path = iter->path;
2786	struct btrfs_extent_item *ei;
2787	struct btrfs_key key;
2788	int ret;
2789
2790	key.objectid = bytenr;
2791	key.type = BTRFS_METADATA_ITEM_KEY;
2792	key.offset = (u64)-1;
2793	iter->bytenr = bytenr;
2794
2795	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2796	if (ret < 0)
2797		return ret;
2798	if (ret == 0) {
 
 
 
 
2799		ret = -EUCLEAN;
2800		goto release;
2801	}
2802	if (path->slots[0] == 0) {
2803		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2804		ret = -EUCLEAN;
2805		goto release;
2806	}
2807	path->slots[0]--;
2808
2809	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2810	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2811	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2812		ret = -ENOENT;
2813		goto release;
2814	}
2815	memcpy(&iter->cur_key, &key, sizeof(key));
2816	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2817						    path->slots[0]);
2818	iter->end_ptr = (u32)(iter->item_ptr +
2819			btrfs_item_size(path->nodes[0], path->slots[0]));
2820	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2821			    struct btrfs_extent_item);
2822
2823	/*
2824	 * Only support iteration on tree backref yet.
2825	 *
2826	 * This is an extra precaution for non skinny-metadata, where
2827	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2828	 * extent flags to determine if it's a tree block.
2829	 */
2830	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2831		ret = -ENOTSUPP;
2832		goto release;
2833	}
2834	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2835
2836	/* If there is no inline backref, go search for keyed backref */
2837	if (iter->cur_ptr >= iter->end_ptr) {
2838		ret = btrfs_next_item(extent_root, path);
2839
2840		/* No inline nor keyed ref */
2841		if (ret > 0) {
2842			ret = -ENOENT;
2843			goto release;
2844		}
2845		if (ret < 0)
2846			goto release;
2847
2848		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2849				path->slots[0]);
2850		if (iter->cur_key.objectid != bytenr ||
2851		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2852		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2853			ret = -ENOENT;
2854			goto release;
2855		}
2856		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2857							   path->slots[0]);
2858		iter->item_ptr = iter->cur_ptr;
2859		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2860				      path->nodes[0], path->slots[0]));
2861	}
2862
2863	return 0;
2864release:
2865	btrfs_backref_iter_release(iter);
2866	return ret;
2867}
2868
 
 
 
 
 
 
 
 
2869/*
2870 * Go to the next backref item of current bytenr, can be either inlined or
2871 * keyed.
2872 *
2873 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2874 *
2875 * Return 0 if we get next backref without problem.
2876 * Return >0 if there is no extra backref for this bytenr.
2877 * Return <0 if there is something wrong happened.
2878 */
2879int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2880{
2881	struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2882	struct btrfs_root *extent_root;
2883	struct btrfs_path *path = iter->path;
2884	struct btrfs_extent_inline_ref *iref;
2885	int ret;
2886	u32 size;
2887
2888	if (btrfs_backref_iter_is_inline_ref(iter)) {
2889		/* We're still inside the inline refs */
2890		ASSERT(iter->cur_ptr < iter->end_ptr);
2891
2892		if (btrfs_backref_has_tree_block_info(iter)) {
2893			/* First tree block info */
2894			size = sizeof(struct btrfs_tree_block_info);
2895		} else {
2896			/* Use inline ref type to determine the size */
2897			int type;
2898
2899			iref = (struct btrfs_extent_inline_ref *)
2900				((unsigned long)iter->cur_ptr);
2901			type = btrfs_extent_inline_ref_type(eb, iref);
2902
2903			size = btrfs_extent_inline_ref_size(type);
2904		}
2905		iter->cur_ptr += size;
2906		if (iter->cur_ptr < iter->end_ptr)
2907			return 0;
2908
2909		/* All inline items iterated, fall through */
2910	}
2911
2912	/* We're at keyed items, there is no inline item, go to the next one */
2913	extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2914	ret = btrfs_next_item(extent_root, iter->path);
2915	if (ret)
2916		return ret;
2917
2918	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2919	if (iter->cur_key.objectid != iter->bytenr ||
2920	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2921	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2922		return 1;
2923	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2924					path->slots[0]);
2925	iter->cur_ptr = iter->item_ptr;
2926	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2927						path->slots[0]);
2928	return 0;
2929}
2930
2931void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2932			      struct btrfs_backref_cache *cache, int is_reloc)
2933{
2934	int i;
2935
2936	cache->rb_root = RB_ROOT;
2937	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2938		INIT_LIST_HEAD(&cache->pending[i]);
2939	INIT_LIST_HEAD(&cache->changed);
2940	INIT_LIST_HEAD(&cache->detached);
2941	INIT_LIST_HEAD(&cache->leaves);
2942	INIT_LIST_HEAD(&cache->pending_edge);
2943	INIT_LIST_HEAD(&cache->useless_node);
2944	cache->fs_info = fs_info;
2945	cache->is_reloc = is_reloc;
2946}
2947
2948struct btrfs_backref_node *btrfs_backref_alloc_node(
2949		struct btrfs_backref_cache *cache, u64 bytenr, int level)
2950{
2951	struct btrfs_backref_node *node;
2952
2953	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2954	node = kzalloc(sizeof(*node), GFP_NOFS);
2955	if (!node)
2956		return node;
2957
2958	INIT_LIST_HEAD(&node->list);
2959	INIT_LIST_HEAD(&node->upper);
2960	INIT_LIST_HEAD(&node->lower);
2961	RB_CLEAR_NODE(&node->rb_node);
2962	cache->nr_nodes++;
2963	node->level = level;
2964	node->bytenr = bytenr;
2965
2966	return node;
2967}
2968
 
 
 
 
 
 
 
 
 
 
 
 
 
2969struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2970		struct btrfs_backref_cache *cache)
2971{
2972	struct btrfs_backref_edge *edge;
2973
2974	edge = kzalloc(sizeof(*edge), GFP_NOFS);
2975	if (edge)
2976		cache->nr_edges++;
2977	return edge;
2978}
2979
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2980/*
2981 * Drop the backref node from cache, also cleaning up all its
2982 * upper edges and any uncached nodes in the path.
2983 *
2984 * This cleanup happens bottom up, thus the node should either
2985 * be the lowest node in the cache or a detached node.
2986 */
2987void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2988				struct btrfs_backref_node *node)
2989{
2990	struct btrfs_backref_node *upper;
2991	struct btrfs_backref_edge *edge;
2992
2993	if (!node)
2994		return;
2995
2996	BUG_ON(!node->lowest && !node->detached);
2997	while (!list_empty(&node->upper)) {
2998		edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2999				  list[LOWER]);
3000		upper = edge->node[UPPER];
3001		list_del(&edge->list[LOWER]);
3002		list_del(&edge->list[UPPER]);
3003		btrfs_backref_free_edge(cache, edge);
3004
3005		/*
3006		 * Add the node to leaf node list if no other child block
3007		 * cached.
3008		 */
3009		if (list_empty(&upper->lower)) {
3010			list_add_tail(&upper->lower, &cache->leaves);
3011			upper->lowest = 1;
3012		}
3013	}
3014
3015	btrfs_backref_drop_node(cache, node);
3016}
3017
3018/*
3019 * Release all nodes/edges from current cache
3020 */
3021void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3022{
3023	struct btrfs_backref_node *node;
3024	int i;
3025
3026	while (!list_empty(&cache->detached)) {
3027		node = list_entry(cache->detached.next,
3028				  struct btrfs_backref_node, list);
3029		btrfs_backref_cleanup_node(cache, node);
3030	}
3031
3032	while (!list_empty(&cache->leaves)) {
3033		node = list_entry(cache->leaves.next,
3034				  struct btrfs_backref_node, lower);
3035		btrfs_backref_cleanup_node(cache, node);
3036	}
3037
3038	cache->last_trans = 0;
3039
3040	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3041		ASSERT(list_empty(&cache->pending[i]));
 
 
 
 
3042	ASSERT(list_empty(&cache->pending_edge));
3043	ASSERT(list_empty(&cache->useless_node));
3044	ASSERT(list_empty(&cache->changed));
3045	ASSERT(list_empty(&cache->detached));
3046	ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3047	ASSERT(!cache->nr_nodes);
3048	ASSERT(!cache->nr_edges);
3049}
3050
 
 
 
 
 
 
 
 
 
 
 
 
 
3051/*
3052 * Handle direct tree backref
3053 *
3054 * Direct tree backref means, the backref item shows its parent bytenr
3055 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3056 *
3057 * @ref_key:	The converted backref key.
3058 *		For keyed backref, it's the item key.
3059 *		For inlined backref, objectid is the bytenr,
3060 *		type is btrfs_inline_ref_type, offset is
3061 *		btrfs_inline_ref_offset.
3062 */
3063static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3064				      struct btrfs_key *ref_key,
3065				      struct btrfs_backref_node *cur)
3066{
3067	struct btrfs_backref_edge *edge;
3068	struct btrfs_backref_node *upper;
3069	struct rb_node *rb_node;
3070
3071	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3072
3073	/* Only reloc root uses backref pointing to itself */
3074	if (ref_key->objectid == ref_key->offset) {
3075		struct btrfs_root *root;
3076
3077		cur->is_reloc_root = 1;
3078		/* Only reloc backref cache cares about a specific root */
3079		if (cache->is_reloc) {
3080			root = find_reloc_root(cache->fs_info, cur->bytenr);
3081			if (!root)
3082				return -ENOENT;
3083			cur->root = root;
3084		} else {
3085			/*
3086			 * For generic purpose backref cache, reloc root node
3087			 * is useless.
3088			 */
3089			list_add(&cur->list, &cache->useless_node);
3090		}
3091		return 0;
3092	}
3093
3094	edge = btrfs_backref_alloc_edge(cache);
3095	if (!edge)
3096		return -ENOMEM;
3097
3098	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3099	if (!rb_node) {
3100		/* Parent node not yet cached */
3101		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3102					   cur->level + 1);
3103		if (!upper) {
3104			btrfs_backref_free_edge(cache, edge);
3105			return -ENOMEM;
3106		}
3107
3108		/*
3109		 *  Backrefs for the upper level block isn't cached, add the
3110		 *  block to pending list
3111		 */
3112		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3113	} else {
3114		/* Parent node already cached */
3115		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3116		ASSERT(upper->checked);
3117		INIT_LIST_HEAD(&edge->list[UPPER]);
3118	}
3119	btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3120	return 0;
3121}
3122
3123/*
3124 * Handle indirect tree backref
3125 *
3126 * Indirect tree backref means, we only know which tree the node belongs to.
3127 * We still need to do a tree search to find out the parents. This is for
3128 * TREE_BLOCK_REF backref (keyed or inlined).
3129 *
 
3130 * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
3131 * @tree_key:	The first key of this tree block.
3132 * @path:	A clean (released) path, to avoid allocating path every time
3133 *		the function get called.
3134 */
3135static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
 
3136					struct btrfs_path *path,
3137					struct btrfs_key *ref_key,
3138					struct btrfs_key *tree_key,
3139					struct btrfs_backref_node *cur)
3140{
3141	struct btrfs_fs_info *fs_info = cache->fs_info;
3142	struct btrfs_backref_node *upper;
3143	struct btrfs_backref_node *lower;
3144	struct btrfs_backref_edge *edge;
3145	struct extent_buffer *eb;
3146	struct btrfs_root *root;
3147	struct rb_node *rb_node;
3148	int level;
3149	bool need_check = true;
3150	int ret;
3151
3152	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3153	if (IS_ERR(root))
3154		return PTR_ERR(root);
3155	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3156		cur->cowonly = 1;
3157
3158	if (btrfs_root_level(&root->root_item) == cur->level) {
3159		/* Tree root */
3160		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3161		/*
3162		 * For reloc backref cache, we may ignore reloc root.  But for
3163		 * general purpose backref cache, we can't rely on
3164		 * btrfs_should_ignore_reloc_root() as it may conflict with
3165		 * current running relocation and lead to missing root.
3166		 *
3167		 * For general purpose backref cache, reloc root detection is
3168		 * completely relying on direct backref (key->offset is parent
3169		 * bytenr), thus only do such check for reloc cache.
3170		 */
3171		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3172			btrfs_put_root(root);
3173			list_add(&cur->list, &cache->useless_node);
3174		} else {
3175			cur->root = root;
3176		}
3177		return 0;
3178	}
3179
3180	level = cur->level + 1;
3181
3182	/* Search the tree to find parent blocks referring to the block */
3183	path->search_commit_root = 1;
3184	path->skip_locking = 1;
3185	path->lowest_level = level;
3186	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3187	path->lowest_level = 0;
3188	if (ret < 0) {
3189		btrfs_put_root(root);
3190		return ret;
3191	}
3192	if (ret > 0 && path->slots[level] > 0)
3193		path->slots[level]--;
3194
3195	eb = path->nodes[level];
3196	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3197		btrfs_err(fs_info,
3198"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3199			  cur->bytenr, level - 1, root->root_key.objectid,
3200			  tree_key->objectid, tree_key->type, tree_key->offset);
3201		btrfs_put_root(root);
3202		ret = -ENOENT;
3203		goto out;
3204	}
3205	lower = cur;
3206
3207	/* Add all nodes and edges in the path */
3208	for (; level < BTRFS_MAX_LEVEL; level++) {
3209		if (!path->nodes[level]) {
3210			ASSERT(btrfs_root_bytenr(&root->root_item) ==
3211			       lower->bytenr);
3212			/* Same as previous should_ignore_reloc_root() call */
3213			if (btrfs_should_ignore_reloc_root(root) &&
3214			    cache->is_reloc) {
3215				btrfs_put_root(root);
3216				list_add(&lower->list, &cache->useless_node);
3217			} else {
3218				lower->root = root;
3219			}
3220			break;
3221		}
3222
3223		edge = btrfs_backref_alloc_edge(cache);
3224		if (!edge) {
3225			btrfs_put_root(root);
3226			ret = -ENOMEM;
3227			goto out;
3228		}
3229
3230		eb = path->nodes[level];
3231		rb_node = rb_simple_search(&cache->rb_root, eb->start);
3232		if (!rb_node) {
3233			upper = btrfs_backref_alloc_node(cache, eb->start,
3234							 lower->level + 1);
3235			if (!upper) {
3236				btrfs_put_root(root);
3237				btrfs_backref_free_edge(cache, edge);
3238				ret = -ENOMEM;
3239				goto out;
3240			}
3241			upper->owner = btrfs_header_owner(eb);
3242			if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3243				upper->cowonly = 1;
3244
3245			/*
3246			 * If we know the block isn't shared we can avoid
3247			 * checking its backrefs.
3248			 */
3249			if (btrfs_block_can_be_shared(root, eb))
3250				upper->checked = 0;
3251			else
3252				upper->checked = 1;
3253
3254			/*
3255			 * Add the block to pending list if we need to check its
3256			 * backrefs, we only do this once while walking up a
3257			 * tree as we will catch anything else later on.
3258			 */
3259			if (!upper->checked && need_check) {
3260				need_check = false;
3261				list_add_tail(&edge->list[UPPER],
3262					      &cache->pending_edge);
3263			} else {
3264				if (upper->checked)
3265					need_check = true;
3266				INIT_LIST_HEAD(&edge->list[UPPER]);
3267			}
3268		} else {
3269			upper = rb_entry(rb_node, struct btrfs_backref_node,
3270					 rb_node);
3271			ASSERT(upper->checked);
3272			INIT_LIST_HEAD(&edge->list[UPPER]);
3273			if (!upper->owner)
3274				upper->owner = btrfs_header_owner(eb);
3275		}
3276		btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3277
3278		if (rb_node) {
3279			btrfs_put_root(root);
3280			break;
3281		}
3282		lower = upper;
3283		upper = NULL;
3284	}
3285out:
3286	btrfs_release_path(path);
3287	return ret;
3288}
3289
3290/*
3291 * Add backref node @cur into @cache.
3292 *
3293 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3294 *	 links aren't yet bi-directional. Needs to finish such links.
3295 *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
3296 *
 
3297 * @path:	Released path for indirect tree backref lookup
3298 * @iter:	Released backref iter for extent tree search
3299 * @node_key:	The first key of the tree block
3300 */
3301int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
 
3302				struct btrfs_path *path,
3303				struct btrfs_backref_iter *iter,
3304				struct btrfs_key *node_key,
3305				struct btrfs_backref_node *cur)
3306{
3307	struct btrfs_fs_info *fs_info = cache->fs_info;
3308	struct btrfs_backref_edge *edge;
3309	struct btrfs_backref_node *exist;
3310	int ret;
3311
3312	ret = btrfs_backref_iter_start(iter, cur->bytenr);
3313	if (ret < 0)
3314		return ret;
3315	/*
3316	 * We skip the first btrfs_tree_block_info, as we don't use the key
3317	 * stored in it, but fetch it from the tree block
3318	 */
3319	if (btrfs_backref_has_tree_block_info(iter)) {
3320		ret = btrfs_backref_iter_next(iter);
3321		if (ret < 0)
3322			goto out;
3323		/* No extra backref? This means the tree block is corrupted */
3324		if (ret > 0) {
3325			ret = -EUCLEAN;
3326			goto out;
3327		}
3328	}
3329	WARN_ON(cur->checked);
3330	if (!list_empty(&cur->upper)) {
3331		/*
3332		 * The backref was added previously when processing backref of
3333		 * type BTRFS_TREE_BLOCK_REF_KEY
3334		 */
3335		ASSERT(list_is_singular(&cur->upper));
3336		edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3337				  list[LOWER]);
3338		ASSERT(list_empty(&edge->list[UPPER]));
3339		exist = edge->node[UPPER];
3340		/*
3341		 * Add the upper level block to pending list if we need check
3342		 * its backrefs
3343		 */
3344		if (!exist->checked)
3345			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3346	} else {
3347		exist = NULL;
3348	}
3349
3350	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3351		struct extent_buffer *eb;
3352		struct btrfs_key key;
3353		int type;
3354
3355		cond_resched();
3356		eb = btrfs_backref_get_eb(iter);
3357
3358		key.objectid = iter->bytenr;
3359		if (btrfs_backref_iter_is_inline_ref(iter)) {
3360			struct btrfs_extent_inline_ref *iref;
3361
3362			/* Update key for inline backref */
3363			iref = (struct btrfs_extent_inline_ref *)
3364				((unsigned long)iter->cur_ptr);
3365			type = btrfs_get_extent_inline_ref_type(eb, iref,
3366							BTRFS_REF_TYPE_BLOCK);
3367			if (type == BTRFS_REF_TYPE_INVALID) {
3368				ret = -EUCLEAN;
3369				goto out;
3370			}
3371			key.type = type;
3372			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3373		} else {
3374			key.type = iter->cur_key.type;
3375			key.offset = iter->cur_key.offset;
3376		}
3377
3378		/*
3379		 * Parent node found and matches current inline ref, no need to
3380		 * rebuild this node for this inline ref
3381		 */
3382		if (exist &&
3383		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3384		      exist->owner == key.offset) ||
3385		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3386		      exist->bytenr == key.offset))) {
3387			exist = NULL;
3388			continue;
3389		}
3390
3391		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3392		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3393			ret = handle_direct_tree_backref(cache, &key, cur);
3394			if (ret < 0)
3395				goto out;
3396			continue;
3397		} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3398			ret = -EINVAL;
3399			btrfs_print_v0_err(fs_info);
3400			btrfs_handle_fs_error(fs_info, ret, NULL);
3401			goto out;
3402		} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3403			continue;
 
 
3404		}
3405
3406		/*
3407		 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3408		 * means the root objectid. We need to search the tree to get
3409		 * its parent bytenr.
3410		 */
3411		ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3412						   cur);
3413		if (ret < 0)
3414			goto out;
3415	}
3416	ret = 0;
3417	cur->checked = 1;
3418	WARN_ON(exist);
3419out:
3420	btrfs_backref_iter_release(iter);
3421	return ret;
3422}
3423
3424/*
3425 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3426 */
3427int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3428				     struct btrfs_backref_node *start)
3429{
3430	struct list_head *useless_node = &cache->useless_node;
3431	struct btrfs_backref_edge *edge;
3432	struct rb_node *rb_node;
3433	LIST_HEAD(pending_edge);
3434
3435	ASSERT(start->checked);
3436
3437	/* Insert this node to cache if it's not COW-only */
3438	if (!start->cowonly) {
3439		rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3440					   &start->rb_node);
3441		if (rb_node)
3442			btrfs_backref_panic(cache->fs_info, start->bytenr,
3443					    -EEXIST);
3444		list_add_tail(&start->lower, &cache->leaves);
3445	}
3446
3447	/*
3448	 * Use breadth first search to iterate all related edges.
3449	 *
3450	 * The starting points are all the edges of this node
3451	 */
3452	list_for_each_entry(edge, &start->upper, list[LOWER])
3453		list_add_tail(&edge->list[UPPER], &pending_edge);
3454
3455	while (!list_empty(&pending_edge)) {
3456		struct btrfs_backref_node *upper;
3457		struct btrfs_backref_node *lower;
3458
3459		edge = list_first_entry(&pending_edge,
3460				struct btrfs_backref_edge, list[UPPER]);
3461		list_del_init(&edge->list[UPPER]);
3462		upper = edge->node[UPPER];
3463		lower = edge->node[LOWER];
3464
3465		/* Parent is detached, no need to keep any edges */
3466		if (upper->detached) {
3467			list_del(&edge->list[LOWER]);
3468			btrfs_backref_free_edge(cache, edge);
3469
3470			/* Lower node is orphan, queue for cleanup */
3471			if (list_empty(&lower->upper))
3472				list_add(&lower->list, useless_node);
3473			continue;
3474		}
3475
3476		/*
3477		 * All new nodes added in current build_backref_tree() haven't
3478		 * been linked to the cache rb tree.
3479		 * So if we have upper->rb_node populated, this means a cache
3480		 * hit. We only need to link the edge, as @upper and all its
3481		 * parents have already been linked.
3482		 */
3483		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3484			if (upper->lowest) {
3485				list_del_init(&upper->lower);
3486				upper->lowest = 0;
3487			}
3488
3489			list_add_tail(&edge->list[UPPER], &upper->lower);
3490			continue;
3491		}
3492
3493		/* Sanity check, we shouldn't have any unchecked nodes */
3494		if (!upper->checked) {
3495			ASSERT(0);
3496			return -EUCLEAN;
3497		}
3498
3499		/* Sanity check, COW-only node has non-COW-only parent */
3500		if (start->cowonly != upper->cowonly) {
3501			ASSERT(0);
3502			return -EUCLEAN;
3503		}
3504
3505		/* Only cache non-COW-only (subvolume trees) tree blocks */
3506		if (!upper->cowonly) {
3507			rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3508						   &upper->rb_node);
3509			if (rb_node) {
3510				btrfs_backref_panic(cache->fs_info,
3511						upper->bytenr, -EEXIST);
3512				return -EUCLEAN;
3513			}
3514		}
3515
3516		list_add_tail(&edge->list[UPPER], &upper->lower);
3517
3518		/*
3519		 * Also queue all the parent edges of this uncached node
3520		 * to finish the upper linkage
3521		 */
3522		list_for_each_entry(edge, &upper->upper, list[LOWER])
3523			list_add_tail(&edge->list[UPPER], &pending_edge);
3524	}
3525	return 0;
3526}
3527
3528void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3529				 struct btrfs_backref_node *node)
3530{
3531	struct btrfs_backref_node *lower;
3532	struct btrfs_backref_node *upper;
3533	struct btrfs_backref_edge *edge;
3534
3535	while (!list_empty(&cache->useless_node)) {
3536		lower = list_first_entry(&cache->useless_node,
3537				   struct btrfs_backref_node, list);
3538		list_del_init(&lower->list);
3539	}
3540	while (!list_empty(&cache->pending_edge)) {
3541		edge = list_first_entry(&cache->pending_edge,
3542				struct btrfs_backref_edge, list[UPPER]);
3543		list_del(&edge->list[UPPER]);
3544		list_del(&edge->list[LOWER]);
3545		lower = edge->node[LOWER];
3546		upper = edge->node[UPPER];
3547		btrfs_backref_free_edge(cache, edge);
3548
3549		/*
3550		 * Lower is no longer linked to any upper backref nodes and
3551		 * isn't in the cache, we can free it ourselves.
3552		 */
3553		if (list_empty(&lower->upper) &&
3554		    RB_EMPTY_NODE(&lower->rb_node))
3555			list_add(&lower->list, &cache->useless_node);
3556
3557		if (!RB_EMPTY_NODE(&upper->rb_node))
3558			continue;
3559
3560		/* Add this guy's upper edges to the list to process */
3561		list_for_each_entry(edge, &upper->upper, list[LOWER])
3562			list_add_tail(&edge->list[UPPER],
3563				      &cache->pending_edge);
3564		if (list_empty(&upper->upper))
3565			list_add(&upper->list, &cache->useless_node);
3566	}
3567
3568	while (!list_empty(&cache->useless_node)) {
3569		lower = list_first_entry(&cache->useless_node,
3570				   struct btrfs_backref_node, list);
3571		list_del_init(&lower->list);
3572		if (lower == node)
3573			node = NULL;
3574		btrfs_backref_drop_node(cache, lower);
3575	}
3576
3577	btrfs_backref_cleanup_node(cache, node);
3578	ASSERT(list_empty(&cache->useless_node) &&
3579	       list_empty(&cache->pending_edge));
3580}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16#include "misc.h"
  17#include "tree-mod-log.h"
  18#include "fs.h"
  19#include "accessors.h"
  20#include "extent-tree.h"
  21#include "relocation.h"
  22#include "tree-checker.h"
  23
  24/* Just arbitrary numbers so we can be sure one of these happened. */
  25#define BACKREF_FOUND_SHARED     6
  26#define BACKREF_FOUND_NOT_SHARED 7
  27
  28struct extent_inode_elem {
  29	u64 inum;
  30	u64 offset;
  31	u64 num_bytes;
  32	struct extent_inode_elem *next;
  33};
  34
  35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
  36			      const struct btrfs_key *key,
  37			      const struct extent_buffer *eb,
  38			      const struct btrfs_file_extent_item *fi,
  39			      struct extent_inode_elem **eie)
  40{
  41	const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
  42	u64 offset = key->offset;
  43	struct extent_inode_elem *e;
  44	const u64 *root_ids;
  45	int root_count;
  46	bool cached;
  47
  48	if (!ctx->ignore_extent_item_pos &&
  49	    !btrfs_file_extent_compression(eb, fi) &&
  50	    !btrfs_file_extent_encryption(eb, fi) &&
  51	    !btrfs_file_extent_other_encoding(eb, fi)) {
  52		u64 data_offset;
  53
  54		data_offset = btrfs_file_extent_offset(eb, fi);
  55
  56		if (ctx->extent_item_pos < data_offset ||
  57		    ctx->extent_item_pos >= data_offset + data_len)
  58			return 1;
  59		offset += ctx->extent_item_pos - data_offset;
  60	}
  61
  62	if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
  63		goto add_inode_elem;
  64
  65	cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
  66				   &root_count);
  67	if (!cached)
  68		goto add_inode_elem;
  69
  70	for (int i = 0; i < root_count; i++) {
  71		int ret;
  72
  73		ret = ctx->indirect_ref_iterator(key->objectid, offset,
  74						 data_len, root_ids[i],
  75						 ctx->user_ctx);
  76		if (ret)
  77			return ret;
  78	}
  79
  80add_inode_elem:
  81	e = kmalloc(sizeof(*e), GFP_NOFS);
  82	if (!e)
  83		return -ENOMEM;
  84
  85	e->next = *eie;
  86	e->inum = key->objectid;
  87	e->offset = offset;
  88	e->num_bytes = data_len;
  89	*eie = e;
  90
  91	return 0;
  92}
  93
  94static void free_inode_elem_list(struct extent_inode_elem *eie)
  95{
  96	struct extent_inode_elem *eie_next;
  97
  98	for (; eie; eie = eie_next) {
  99		eie_next = eie->next;
 100		kfree(eie);
 101	}
 102}
 103
 104static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
 105			     const struct extent_buffer *eb,
 106			     struct extent_inode_elem **eie)
 107{
 108	u64 disk_byte;
 109	struct btrfs_key key;
 110	struct btrfs_file_extent_item *fi;
 111	int slot;
 112	int nritems;
 113	int extent_type;
 114	int ret;
 115
 116	/*
 117	 * from the shared data ref, we only have the leaf but we need
 118	 * the key. thus, we must look into all items and see that we
 119	 * find one (some) with a reference to our extent item.
 120	 */
 121	nritems = btrfs_header_nritems(eb);
 122	for (slot = 0; slot < nritems; ++slot) {
 123		btrfs_item_key_to_cpu(eb, &key, slot);
 124		if (key.type != BTRFS_EXTENT_DATA_KEY)
 125			continue;
 126		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 127		extent_type = btrfs_file_extent_type(eb, fi);
 128		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 129			continue;
 130		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 131		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 132		if (disk_byte != ctx->bytenr)
 133			continue;
 134
 135		ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
 136		if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 137			return ret;
 138	}
 139
 140	return 0;
 141}
 142
 143struct preftree {
 144	struct rb_root_cached root;
 145	unsigned int count;
 146};
 147
 148#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 149
 150struct preftrees {
 151	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 152	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 153	struct preftree indirect_missing_keys;
 154};
 155
 156/*
 157 * Checks for a shared extent during backref search.
 158 *
 159 * The share_count tracks prelim_refs (direct and indirect) having a
 160 * ref->count >0:
 161 *  - incremented when a ref->count transitions to >0
 162 *  - decremented when a ref->count transitions to <1
 163 */
 164struct share_check {
 165	struct btrfs_backref_share_check_ctx *ctx;
 166	struct btrfs_root *root;
 167	u64 inum;
 168	u64 data_bytenr;
 169	u64 data_extent_gen;
 170	/*
 171	 * Counts number of inodes that refer to an extent (different inodes in
 172	 * the same root or different roots) that we could find. The sharedness
 173	 * check typically stops once this counter gets greater than 1, so it
 174	 * may not reflect the total number of inodes.
 175	 */
 176	int share_count;
 177	/*
 178	 * The number of times we found our inode refers to the data extent we
 179	 * are determining the sharedness. In other words, how many file extent
 180	 * items we could find for our inode that point to our target data
 181	 * extent. The value we get here after finishing the extent sharedness
 182	 * check may be smaller than reality, but if it ends up being greater
 183	 * than 1, then we know for sure the inode has multiple file extent
 184	 * items that point to our inode, and we can safely assume it's useful
 185	 * to cache the sharedness check result.
 186	 */
 187	int self_ref_count;
 188	bool have_delayed_delete_refs;
 189};
 190
 191static inline int extent_is_shared(struct share_check *sc)
 192{
 193	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 194}
 195
 196static struct kmem_cache *btrfs_prelim_ref_cache;
 197
 198int __init btrfs_prelim_ref_init(void)
 199{
 200	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 201					sizeof(struct prelim_ref), 0, 0, NULL);
 
 
 
 202	if (!btrfs_prelim_ref_cache)
 203		return -ENOMEM;
 204	return 0;
 205}
 206
 207void __cold btrfs_prelim_ref_exit(void)
 208{
 209	kmem_cache_destroy(btrfs_prelim_ref_cache);
 210}
 211
 212static void free_pref(struct prelim_ref *ref)
 213{
 214	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 215}
 216
 217/*
 218 * Return 0 when both refs are for the same block (and can be merged).
 219 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 220 * indicates a 'higher' block.
 221 */
 222static int prelim_ref_compare(const struct prelim_ref *ref1,
 223			      const struct prelim_ref *ref2)
 224{
 225	if (ref1->level < ref2->level)
 226		return -1;
 227	if (ref1->level > ref2->level)
 228		return 1;
 229	if (ref1->root_id < ref2->root_id)
 230		return -1;
 231	if (ref1->root_id > ref2->root_id)
 232		return 1;
 233	if (ref1->key_for_search.type < ref2->key_for_search.type)
 234		return -1;
 235	if (ref1->key_for_search.type > ref2->key_for_search.type)
 236		return 1;
 237	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 238		return -1;
 239	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 240		return 1;
 241	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 242		return -1;
 243	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 244		return 1;
 245	if (ref1->parent < ref2->parent)
 246		return -1;
 247	if (ref1->parent > ref2->parent)
 248		return 1;
 249
 250	return 0;
 251}
 252
 253static void update_share_count(struct share_check *sc, int oldcount,
 254			       int newcount, const struct prelim_ref *newref)
 255{
 256	if ((!sc) || (oldcount == 0 && newcount < 1))
 257		return;
 258
 259	if (oldcount > 0 && newcount < 1)
 260		sc->share_count--;
 261	else if (oldcount < 1 && newcount > 0)
 262		sc->share_count++;
 263
 264	if (newref->root_id == btrfs_root_id(sc->root) &&
 265	    newref->wanted_disk_byte == sc->data_bytenr &&
 266	    newref->key_for_search.objectid == sc->inum)
 267		sc->self_ref_count += newref->count;
 268}
 269
 270/*
 271 * Add @newref to the @root rbtree, merging identical refs.
 272 *
 273 * Callers should assume that newref has been freed after calling.
 274 */
 275static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 276			      struct preftree *preftree,
 277			      struct prelim_ref *newref,
 278			      struct share_check *sc)
 279{
 280	struct rb_root_cached *root;
 281	struct rb_node **p;
 282	struct rb_node *parent = NULL;
 283	struct prelim_ref *ref;
 284	int result;
 285	bool leftmost = true;
 286
 287	root = &preftree->root;
 288	p = &root->rb_root.rb_node;
 289
 290	while (*p) {
 291		parent = *p;
 292		ref = rb_entry(parent, struct prelim_ref, rbnode);
 293		result = prelim_ref_compare(ref, newref);
 294		if (result < 0) {
 295			p = &(*p)->rb_left;
 296		} else if (result > 0) {
 297			p = &(*p)->rb_right;
 298			leftmost = false;
 299		} else {
 300			/* Identical refs, merge them and free @newref */
 301			struct extent_inode_elem *eie = ref->inode_list;
 302
 303			while (eie && eie->next)
 304				eie = eie->next;
 305
 306			if (!eie)
 307				ref->inode_list = newref->inode_list;
 308			else
 309				eie->next = newref->inode_list;
 310			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 311						     preftree->count);
 312			/*
 313			 * A delayed ref can have newref->count < 0.
 314			 * The ref->count is updated to follow any
 315			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 316			 */
 317			update_share_count(sc, ref->count,
 318					   ref->count + newref->count, newref);
 319			ref->count += newref->count;
 320			free_pref(newref);
 321			return;
 322		}
 323	}
 324
 325	update_share_count(sc, 0, newref->count, newref);
 326	preftree->count++;
 327	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 328	rb_link_node(&newref->rbnode, parent, p);
 329	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 330}
 331
 332/*
 333 * Release the entire tree.  We don't care about internal consistency so
 334 * just free everything and then reset the tree root.
 335 */
 336static void prelim_release(struct preftree *preftree)
 337{
 338	struct prelim_ref *ref, *next_ref;
 339
 340	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 341					     &preftree->root.rb_root, rbnode) {
 342		free_inode_elem_list(ref->inode_list);
 343		free_pref(ref);
 344	}
 345
 346	preftree->root = RB_ROOT_CACHED;
 347	preftree->count = 0;
 348}
 349
 350/*
 351 * the rules for all callers of this function are:
 352 * - obtaining the parent is the goal
 353 * - if you add a key, you must know that it is a correct key
 354 * - if you cannot add the parent or a correct key, then we will look into the
 355 *   block later to set a correct key
 356 *
 357 * delayed refs
 358 * ============
 359 *        backref type | shared | indirect | shared | indirect
 360 * information         |   tree |     tree |   data |     data
 361 * --------------------+--------+----------+--------+----------
 362 *      parent logical |    y   |     -    |    -   |     -
 363 *      key to resolve |    -   |     y    |    y   |     y
 364 *  tree block logical |    -   |     -    |    -   |     -
 365 *  root for resolving |    y   |     y    |    y   |     y
 366 *
 367 * - column 1:       we've the parent -> done
 368 * - column 2, 3, 4: we use the key to find the parent
 369 *
 370 * on disk refs (inline or keyed)
 371 * ==============================
 372 *        backref type | shared | indirect | shared | indirect
 373 * information         |   tree |     tree |   data |     data
 374 * --------------------+--------+----------+--------+----------
 375 *      parent logical |    y   |     -    |    y   |     -
 376 *      key to resolve |    -   |     -    |    -   |     y
 377 *  tree block logical |    y   |     y    |    y   |     y
 378 *  root for resolving |    -   |     y    |    y   |     y
 379 *
 380 * - column 1, 3: we've the parent -> done
 381 * - column 2:    we take the first key from the block to find the parent
 382 *                (see add_missing_keys)
 383 * - column 4:    we use the key to find the parent
 384 *
 385 * additional information that's available but not required to find the parent
 386 * block might help in merging entries to gain some speed.
 387 */
 388static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 389			  struct preftree *preftree, u64 root_id,
 390			  const struct btrfs_key *key, int level, u64 parent,
 391			  u64 wanted_disk_byte, int count,
 392			  struct share_check *sc, gfp_t gfp_mask)
 393{
 394	struct prelim_ref *ref;
 395
 396	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 397		return 0;
 398
 399	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 400	if (!ref)
 401		return -ENOMEM;
 402
 403	ref->root_id = root_id;
 404	if (key)
 405		ref->key_for_search = *key;
 406	else
 407		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 408
 409	ref->inode_list = NULL;
 410	ref->level = level;
 411	ref->count = count;
 412	ref->parent = parent;
 413	ref->wanted_disk_byte = wanted_disk_byte;
 414	prelim_ref_insert(fs_info, preftree, ref, sc);
 415	return extent_is_shared(sc);
 416}
 417
 418/* direct refs use root == 0, key == NULL */
 419static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 420			  struct preftrees *preftrees, int level, u64 parent,
 421			  u64 wanted_disk_byte, int count,
 422			  struct share_check *sc, gfp_t gfp_mask)
 423{
 424	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 425			      parent, wanted_disk_byte, count, sc, gfp_mask);
 426}
 427
 428/* indirect refs use parent == 0 */
 429static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 430			    struct preftrees *preftrees, u64 root_id,
 431			    const struct btrfs_key *key, int level,
 432			    u64 wanted_disk_byte, int count,
 433			    struct share_check *sc, gfp_t gfp_mask)
 434{
 435	struct preftree *tree = &preftrees->indirect;
 436
 437	if (!key)
 438		tree = &preftrees->indirect_missing_keys;
 439	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 440			      wanted_disk_byte, count, sc, gfp_mask);
 441}
 442
 443static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
 444{
 445	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
 446	struct rb_node *parent = NULL;
 447	struct prelim_ref *ref = NULL;
 448	struct prelim_ref target = {};
 449	int result;
 450
 451	target.parent = bytenr;
 452
 453	while (*p) {
 454		parent = *p;
 455		ref = rb_entry(parent, struct prelim_ref, rbnode);
 456		result = prelim_ref_compare(ref, &target);
 457
 458		if (result < 0)
 459			p = &(*p)->rb_left;
 460		else if (result > 0)
 461			p = &(*p)->rb_right;
 462		else
 463			return 1;
 464	}
 465	return 0;
 466}
 467
 468static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
 469			   struct btrfs_root *root, struct btrfs_path *path,
 470			   struct ulist *parents,
 471			   struct preftrees *preftrees, struct prelim_ref *ref,
 472			   int level)
 473{
 474	int ret = 0;
 475	int slot;
 476	struct extent_buffer *eb;
 477	struct btrfs_key key;
 478	struct btrfs_key *key_for_search = &ref->key_for_search;
 479	struct btrfs_file_extent_item *fi;
 480	struct extent_inode_elem *eie = NULL, *old = NULL;
 481	u64 disk_byte;
 482	u64 wanted_disk_byte = ref->wanted_disk_byte;
 483	u64 count = 0;
 484	u64 data_offset;
 485	u8 type;
 486
 487	if (level != 0) {
 488		eb = path->nodes[level];
 489		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 490		if (ret < 0)
 491			return ret;
 492		return 0;
 493	}
 494
 495	/*
 496	 * 1. We normally enter this function with the path already pointing to
 497	 *    the first item to check. But sometimes, we may enter it with
 498	 *    slot == nritems.
 499	 * 2. We are searching for normal backref but bytenr of this leaf
 500	 *    matches shared data backref
 501	 * 3. The leaf owner is not equal to the root we are searching
 502	 *
 503	 * For these cases, go to the next leaf before we continue.
 504	 */
 505	eb = path->nodes[0];
 506	if (path->slots[0] >= btrfs_header_nritems(eb) ||
 507	    is_shared_data_backref(preftrees, eb->start) ||
 508	    ref->root_id != btrfs_header_owner(eb)) {
 509		if (ctx->time_seq == BTRFS_SEQ_LAST)
 510			ret = btrfs_next_leaf(root, path);
 511		else
 512			ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 513	}
 514
 515	while (!ret && count < ref->count) {
 516		eb = path->nodes[0];
 517		slot = path->slots[0];
 518
 519		btrfs_item_key_to_cpu(eb, &key, slot);
 520
 521		if (key.objectid != key_for_search->objectid ||
 522		    key.type != BTRFS_EXTENT_DATA_KEY)
 523			break;
 524
 525		/*
 526		 * We are searching for normal backref but bytenr of this leaf
 527		 * matches shared data backref, OR
 528		 * the leaf owner is not equal to the root we are searching for
 529		 */
 530		if (slot == 0 &&
 531		    (is_shared_data_backref(preftrees, eb->start) ||
 532		     ref->root_id != btrfs_header_owner(eb))) {
 533			if (ctx->time_seq == BTRFS_SEQ_LAST)
 534				ret = btrfs_next_leaf(root, path);
 535			else
 536				ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 537			continue;
 538		}
 539		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 540		type = btrfs_file_extent_type(eb, fi);
 541		if (type == BTRFS_FILE_EXTENT_INLINE)
 542			goto next;
 543		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 544		data_offset = btrfs_file_extent_offset(eb, fi);
 545
 546		if (disk_byte == wanted_disk_byte) {
 547			eie = NULL;
 548			old = NULL;
 549			if (ref->key_for_search.offset == key.offset - data_offset)
 550				count++;
 551			else
 552				goto next;
 553			if (!ctx->skip_inode_ref_list) {
 554				ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
 555				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
 556				    ret < 0)
 557					break;
 558			}
 559			if (ret > 0)
 560				goto next;
 561			ret = ulist_add_merge_ptr(parents, eb->start,
 562						  eie, (void **)&old, GFP_NOFS);
 563			if (ret < 0)
 564				break;
 565			if (!ret && !ctx->skip_inode_ref_list) {
 566				while (old->next)
 567					old = old->next;
 568				old->next = eie;
 569			}
 570			eie = NULL;
 571		}
 572next:
 573		if (ctx->time_seq == BTRFS_SEQ_LAST)
 574			ret = btrfs_next_item(root, path);
 575		else
 576			ret = btrfs_next_old_item(root, path, ctx->time_seq);
 577	}
 578
 579	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 580		free_inode_elem_list(eie);
 581	else if (ret > 0)
 582		ret = 0;
 583
 584	return ret;
 585}
 586
 587/*
 588 * resolve an indirect backref in the form (root_id, key, level)
 589 * to a logical address
 590 */
 591static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
 592				struct btrfs_path *path,
 593				struct preftrees *preftrees,
 594				struct prelim_ref *ref, struct ulist *parents)
 595{
 596	struct btrfs_root *root;
 597	struct extent_buffer *eb;
 598	int ret = 0;
 599	int root_level;
 600	int level = ref->level;
 601	struct btrfs_key search_key = ref->key_for_search;
 602
 603	/*
 604	 * If we're search_commit_root we could possibly be holding locks on
 605	 * other tree nodes.  This happens when qgroups does backref walks when
 606	 * adding new delayed refs.  To deal with this we need to look in cache
 607	 * for the root, and if we don't find it then we need to search the
 608	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
 609	 * here.
 610	 */
 611	if (path->search_commit_root)
 612		root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
 613	else
 614		root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
 615	if (IS_ERR(root)) {
 616		ret = PTR_ERR(root);
 617		goto out_free;
 618	}
 619
 620	if (!path->search_commit_root &&
 621	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
 622		ret = -ENOENT;
 623		goto out;
 624	}
 625
 626	if (btrfs_is_testing(ctx->fs_info)) {
 627		ret = -ENOENT;
 628		goto out;
 629	}
 630
 631	if (path->search_commit_root)
 632		root_level = btrfs_header_level(root->commit_root);
 633	else if (ctx->time_seq == BTRFS_SEQ_LAST)
 634		root_level = btrfs_header_level(root->node);
 635	else
 636		root_level = btrfs_old_root_level(root, ctx->time_seq);
 637
 638	if (root_level + 1 == level)
 639		goto out;
 640
 641	/*
 642	 * We can often find data backrefs with an offset that is too large
 643	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
 644	 * subtracting a file's offset with the data offset of its
 645	 * corresponding extent data item. This can happen for example in the
 646	 * clone ioctl.
 647	 *
 648	 * So if we detect such case we set the search key's offset to zero to
 649	 * make sure we will find the matching file extent item at
 650	 * add_all_parents(), otherwise we will miss it because the offset
 651	 * taken form the backref is much larger then the offset of the file
 652	 * extent item. This can make us scan a very large number of file
 653	 * extent items, but at least it will not make us miss any.
 654	 *
 655	 * This is an ugly workaround for a behaviour that should have never
 656	 * existed, but it does and a fix for the clone ioctl would touch a lot
 657	 * of places, cause backwards incompatibility and would not fix the
 658	 * problem for extents cloned with older kernels.
 659	 */
 660	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
 661	    search_key.offset >= LLONG_MAX)
 662		search_key.offset = 0;
 663	path->lowest_level = level;
 664	if (ctx->time_seq == BTRFS_SEQ_LAST)
 665		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 666	else
 667		ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
 668
 669	btrfs_debug(ctx->fs_info,
 670		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 671		 ref->root_id, level, ref->count, ret,
 672		 ref->key_for_search.objectid, ref->key_for_search.type,
 673		 ref->key_for_search.offset);
 674	if (ret < 0)
 675		goto out;
 676
 677	eb = path->nodes[level];
 678	while (!eb) {
 679		if (WARN_ON(!level)) {
 680			ret = 1;
 681			goto out;
 682		}
 683		level--;
 684		eb = path->nodes[level];
 685	}
 686
 687	ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
 688out:
 689	btrfs_put_root(root);
 690out_free:
 691	path->lowest_level = 0;
 692	btrfs_release_path(path);
 693	return ret;
 694}
 695
 696static struct extent_inode_elem *
 697unode_aux_to_inode_list(struct ulist_node *node)
 698{
 699	if (!node)
 700		return NULL;
 701	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 702}
 703
 704static void free_leaf_list(struct ulist *ulist)
 705{
 706	struct ulist_node *node;
 707	struct ulist_iterator uiter;
 708
 709	ULIST_ITER_INIT(&uiter);
 710	while ((node = ulist_next(ulist, &uiter)))
 711		free_inode_elem_list(unode_aux_to_inode_list(node));
 712
 713	ulist_free(ulist);
 714}
 715
 716/*
 717 * We maintain three separate rbtrees: one for direct refs, one for
 718 * indirect refs which have a key, and one for indirect refs which do not
 719 * have a key. Each tree does merge on insertion.
 720 *
 721 * Once all of the references are located, we iterate over the tree of
 722 * indirect refs with missing keys. An appropriate key is located and
 723 * the ref is moved onto the tree for indirect refs. After all missing
 724 * keys are thus located, we iterate over the indirect ref tree, resolve
 725 * each reference, and then insert the resolved reference onto the
 726 * direct tree (merging there too).
 727 *
 728 * New backrefs (i.e., for parent nodes) are added to the appropriate
 729 * rbtree as they are encountered. The new backrefs are subsequently
 730 * resolved as above.
 731 */
 732static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
 733				 struct btrfs_path *path,
 734				 struct preftrees *preftrees,
 735				 struct share_check *sc)
 736{
 737	int err;
 738	int ret = 0;
 739	struct ulist *parents;
 740	struct ulist_node *node;
 741	struct ulist_iterator uiter;
 742	struct rb_node *rnode;
 743
 744	parents = ulist_alloc(GFP_NOFS);
 745	if (!parents)
 746		return -ENOMEM;
 747
 748	/*
 749	 * We could trade memory usage for performance here by iterating
 750	 * the tree, allocating new refs for each insertion, and then
 751	 * freeing the entire indirect tree when we're done.  In some test
 752	 * cases, the tree can grow quite large (~200k objects).
 753	 */
 754	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 755		struct prelim_ref *ref;
 756
 757		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 758		if (WARN(ref->parent,
 759			 "BUG: direct ref found in indirect tree")) {
 760			ret = -EINVAL;
 761			goto out;
 762		}
 763
 764		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 765		preftrees->indirect.count--;
 766
 767		if (ref->count == 0) {
 768			free_pref(ref);
 769			continue;
 770		}
 771
 772		if (sc && ref->root_id != btrfs_root_id(sc->root)) {
 773			free_pref(ref);
 774			ret = BACKREF_FOUND_SHARED;
 775			goto out;
 776		}
 777		err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
 778		/*
 779		 * we can only tolerate ENOENT,otherwise,we should catch error
 780		 * and return directly.
 781		 */
 782		if (err == -ENOENT) {
 783			prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
 784					  NULL);
 785			continue;
 786		} else if (err) {
 787			free_pref(ref);
 788			ret = err;
 789			goto out;
 790		}
 791
 792		/* we put the first parent into the ref at hand */
 793		ULIST_ITER_INIT(&uiter);
 794		node = ulist_next(parents, &uiter);
 795		ref->parent = node ? node->val : 0;
 796		ref->inode_list = unode_aux_to_inode_list(node);
 797
 798		/* Add a prelim_ref(s) for any other parent(s). */
 799		while ((node = ulist_next(parents, &uiter))) {
 800			struct prelim_ref *new_ref;
 801
 802			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 803						   GFP_NOFS);
 804			if (!new_ref) {
 805				free_pref(ref);
 806				ret = -ENOMEM;
 807				goto out;
 808			}
 809			memcpy(new_ref, ref, sizeof(*ref));
 810			new_ref->parent = node->val;
 811			new_ref->inode_list = unode_aux_to_inode_list(node);
 812			prelim_ref_insert(ctx->fs_info, &preftrees->direct,
 813					  new_ref, NULL);
 814		}
 815
 816		/*
 817		 * Now it's a direct ref, put it in the direct tree. We must
 818		 * do this last because the ref could be merged/freed here.
 819		 */
 820		prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
 821
 822		ulist_reinit(parents);
 823		cond_resched();
 824	}
 825out:
 826	/*
 827	 * We may have inode lists attached to refs in the parents ulist, so we
 828	 * must free them before freeing the ulist and its refs.
 829	 */
 830	free_leaf_list(parents);
 831	return ret;
 832}
 833
 834/*
 835 * read tree blocks and add keys where required.
 836 */
 837static int add_missing_keys(struct btrfs_fs_info *fs_info,
 838			    struct preftrees *preftrees, bool lock)
 839{
 840	struct prelim_ref *ref;
 841	struct extent_buffer *eb;
 842	struct preftree *tree = &preftrees->indirect_missing_keys;
 843	struct rb_node *node;
 844
 845	while ((node = rb_first_cached(&tree->root))) {
 846		struct btrfs_tree_parent_check check = { 0 };
 847
 848		ref = rb_entry(node, struct prelim_ref, rbnode);
 849		rb_erase_cached(node, &tree->root);
 850
 851		BUG_ON(ref->parent);	/* should not be a direct ref */
 852		BUG_ON(ref->key_for_search.type);
 853		BUG_ON(!ref->wanted_disk_byte);
 854
 855		check.level = ref->level - 1;
 856		check.owner_root = ref->root_id;
 857
 858		eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
 859		if (IS_ERR(eb)) {
 860			free_pref(ref);
 861			return PTR_ERR(eb);
 862		}
 863		if (!extent_buffer_uptodate(eb)) {
 864			free_pref(ref);
 865			free_extent_buffer(eb);
 866			return -EIO;
 867		}
 868
 869		if (lock)
 870			btrfs_tree_read_lock(eb);
 871		if (btrfs_header_level(eb) == 0)
 872			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 873		else
 874			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 875		if (lock)
 876			btrfs_tree_read_unlock(eb);
 877		free_extent_buffer(eb);
 878		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 879		cond_resched();
 880	}
 881	return 0;
 882}
 883
 884/*
 885 * add all currently queued delayed refs from this head whose seq nr is
 886 * smaller or equal that seq to the list
 887 */
 888static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 889			    struct btrfs_delayed_ref_head *head, u64 seq,
 890			    struct preftrees *preftrees, struct share_check *sc)
 891{
 892	struct btrfs_delayed_ref_node *node;
 893	struct btrfs_key key;
 894	struct rb_node *n;
 895	int count;
 896	int ret = 0;
 897
 898	spin_lock(&head->lock);
 899	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 900		node = rb_entry(n, struct btrfs_delayed_ref_node,
 901				ref_node);
 902		if (node->seq > seq)
 903			continue;
 904
 905		switch (node->action) {
 906		case BTRFS_ADD_DELAYED_EXTENT:
 907		case BTRFS_UPDATE_DELAYED_HEAD:
 908			WARN_ON(1);
 909			continue;
 910		case BTRFS_ADD_DELAYED_REF:
 911			count = node->ref_mod;
 912			break;
 913		case BTRFS_DROP_DELAYED_REF:
 914			count = node->ref_mod * -1;
 915			break;
 916		default:
 917			BUG();
 918		}
 919		switch (node->type) {
 920		case BTRFS_TREE_BLOCK_REF_KEY: {
 921			/* NORMAL INDIRECT METADATA backref */
 
 922			struct btrfs_key *key_ptr = NULL;
 923			/* The owner of a tree block ref is the level. */
 924			int level = btrfs_delayed_ref_owner(node);
 925
 926			if (head->extent_op && head->extent_op->update_key) {
 927				btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
 928				key_ptr = &key;
 929			}
 930
 931			ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
 932					       key_ptr, level + 1, node->bytenr,
 933					       count, sc, GFP_ATOMIC);
 
 
 934			break;
 935		}
 936		case BTRFS_SHARED_BLOCK_REF_KEY: {
 937			/*
 938			 * SHARED DIRECT METADATA backref
 939			 *
 940			 * The owner of a tree block ref is the level.
 941			 */
 942			int level = btrfs_delayed_ref_owner(node);
 943
 944			ret = add_direct_ref(fs_info, preftrees, level + 1,
 945					     node->parent, node->bytenr, count,
 946					     sc, GFP_ATOMIC);
 947			break;
 948		}
 949		case BTRFS_EXTENT_DATA_REF_KEY: {
 950			/* NORMAL INDIRECT DATA backref */
 951			key.objectid = btrfs_delayed_ref_owner(node);
 
 
 
 952			key.type = BTRFS_EXTENT_DATA_KEY;
 953			key.offset = btrfs_delayed_ref_offset(node);
 954
 955			/*
 956			 * If we have a share check context and a reference for
 957			 * another inode, we can't exit immediately. This is
 958			 * because even if this is a BTRFS_ADD_DELAYED_REF
 959			 * reference we may find next a BTRFS_DROP_DELAYED_REF
 960			 * which cancels out this ADD reference.
 961			 *
 962			 * If this is a DROP reference and there was no previous
 963			 * ADD reference, then we need to signal that when we
 964			 * process references from the extent tree (through
 965			 * add_inline_refs() and add_keyed_refs()), we should
 966			 * not exit early if we find a reference for another
 967			 * inode, because one of the delayed DROP references
 968			 * may cancel that reference in the extent tree.
 969			 */
 970			if (sc && count < 0)
 971				sc->have_delayed_delete_refs = true;
 972
 973			ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
 974					       &key, 0, node->bytenr, count, sc,
 975					       GFP_ATOMIC);
 976			break;
 977		}
 978		case BTRFS_SHARED_DATA_REF_KEY: {
 979			/* SHARED DIRECT FULL backref */
 980			ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
 
 
 
 
 981					     node->bytenr, count, sc,
 982					     GFP_ATOMIC);
 983			break;
 984		}
 985		default:
 986			WARN_ON(1);
 987		}
 988		/*
 989		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 990		 * refs have been checked.
 991		 */
 992		if (ret && (ret != BACKREF_FOUND_SHARED))
 993			break;
 994	}
 995	if (!ret)
 996		ret = extent_is_shared(sc);
 997
 998	spin_unlock(&head->lock);
 999	return ret;
1000}
1001
1002/*
1003 * add all inline backrefs for bytenr to the list
1004 *
1005 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1006 */
1007static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1008			   struct btrfs_path *path,
1009			   int *info_level, struct preftrees *preftrees,
1010			   struct share_check *sc)
1011{
1012	int ret = 0;
1013	int slot;
1014	struct extent_buffer *leaf;
1015	struct btrfs_key key;
1016	struct btrfs_key found_key;
1017	unsigned long ptr;
1018	unsigned long end;
1019	struct btrfs_extent_item *ei;
1020	u64 flags;
1021	u64 item_size;
1022
1023	/*
1024	 * enumerate all inline refs
1025	 */
1026	leaf = path->nodes[0];
1027	slot = path->slots[0];
1028
1029	item_size = btrfs_item_size(leaf, slot);
 
 
1030	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1031
1032	if (ctx->check_extent_item) {
1033		ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1034		if (ret)
1035			return ret;
1036	}
1037
1038	flags = btrfs_extent_flags(leaf, ei);
1039	btrfs_item_key_to_cpu(leaf, &found_key, slot);
1040
1041	ptr = (unsigned long)(ei + 1);
1042	end = (unsigned long)ei + item_size;
1043
1044	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1045	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1046		struct btrfs_tree_block_info *info;
1047
1048		info = (struct btrfs_tree_block_info *)ptr;
1049		*info_level = btrfs_tree_block_level(leaf, info);
1050		ptr += sizeof(struct btrfs_tree_block_info);
1051		BUG_ON(ptr > end);
1052	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1053		*info_level = found_key.offset;
1054	} else {
1055		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1056	}
1057
1058	while (ptr < end) {
1059		struct btrfs_extent_inline_ref *iref;
1060		u64 offset;
1061		int type;
1062
1063		iref = (struct btrfs_extent_inline_ref *)ptr;
1064		type = btrfs_get_extent_inline_ref_type(leaf, iref,
1065							BTRFS_REF_TYPE_ANY);
1066		if (type == BTRFS_REF_TYPE_INVALID)
1067			return -EUCLEAN;
1068
1069		offset = btrfs_extent_inline_ref_offset(leaf, iref);
1070
1071		switch (type) {
1072		case BTRFS_SHARED_BLOCK_REF_KEY:
1073			ret = add_direct_ref(ctx->fs_info, preftrees,
1074					     *info_level + 1, offset,
1075					     ctx->bytenr, 1, NULL, GFP_NOFS);
1076			break;
1077		case BTRFS_SHARED_DATA_REF_KEY: {
1078			struct btrfs_shared_data_ref *sdref;
1079			int count;
1080
1081			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1082			count = btrfs_shared_data_ref_count(leaf, sdref);
1083
1084			ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1085					     ctx->bytenr, count, sc, GFP_NOFS);
1086			break;
1087		}
1088		case BTRFS_TREE_BLOCK_REF_KEY:
1089			ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1090					       NULL, *info_level + 1,
1091					       ctx->bytenr, 1, NULL, GFP_NOFS);
1092			break;
1093		case BTRFS_EXTENT_DATA_REF_KEY: {
1094			struct btrfs_extent_data_ref *dref;
1095			int count;
1096			u64 root;
1097
1098			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1099			count = btrfs_extent_data_ref_count(leaf, dref);
1100			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1101								      dref);
1102			key.type = BTRFS_EXTENT_DATA_KEY;
1103			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1104
1105			if (sc && key.objectid != sc->inum &&
1106			    !sc->have_delayed_delete_refs) {
1107				ret = BACKREF_FOUND_SHARED;
1108				break;
1109			}
1110
1111			root = btrfs_extent_data_ref_root(leaf, dref);
1112
1113			if (!ctx->skip_data_ref ||
1114			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1115						ctx->user_ctx))
1116				ret = add_indirect_ref(ctx->fs_info, preftrees,
1117						       root, &key, 0, ctx->bytenr,
1118						       count, sc, GFP_NOFS);
1119			break;
1120		}
1121		case BTRFS_EXTENT_OWNER_REF_KEY:
1122			ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
1123			break;
1124		default:
1125			WARN_ON(1);
1126		}
1127		if (ret)
1128			return ret;
1129		ptr += btrfs_extent_inline_ref_size(type);
1130	}
1131
1132	return 0;
1133}
1134
1135/*
1136 * add all non-inline backrefs for bytenr to the list
1137 *
1138 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1139 */
1140static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1141			  struct btrfs_root *extent_root,
1142			  struct btrfs_path *path,
1143			  int info_level, struct preftrees *preftrees,
1144			  struct share_check *sc)
1145{
1146	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1147	int ret;
1148	int slot;
1149	struct extent_buffer *leaf;
1150	struct btrfs_key key;
1151
1152	while (1) {
1153		ret = btrfs_next_item(extent_root, path);
1154		if (ret < 0)
1155			break;
1156		if (ret) {
1157			ret = 0;
1158			break;
1159		}
1160
1161		slot = path->slots[0];
1162		leaf = path->nodes[0];
1163		btrfs_item_key_to_cpu(leaf, &key, slot);
1164
1165		if (key.objectid != ctx->bytenr)
1166			break;
1167		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1168			continue;
1169		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1170			break;
1171
1172		switch (key.type) {
1173		case BTRFS_SHARED_BLOCK_REF_KEY:
1174			/* SHARED DIRECT METADATA backref */
1175			ret = add_direct_ref(fs_info, preftrees,
1176					     info_level + 1, key.offset,
1177					     ctx->bytenr, 1, NULL, GFP_NOFS);
1178			break;
1179		case BTRFS_SHARED_DATA_REF_KEY: {
1180			/* SHARED DIRECT FULL backref */
1181			struct btrfs_shared_data_ref *sdref;
1182			int count;
1183
1184			sdref = btrfs_item_ptr(leaf, slot,
1185					      struct btrfs_shared_data_ref);
1186			count = btrfs_shared_data_ref_count(leaf, sdref);
1187			ret = add_direct_ref(fs_info, preftrees, 0,
1188					     key.offset, ctx->bytenr, count,
1189					     sc, GFP_NOFS);
1190			break;
1191		}
1192		case BTRFS_TREE_BLOCK_REF_KEY:
1193			/* NORMAL INDIRECT METADATA backref */
1194			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1195					       NULL, info_level + 1, ctx->bytenr,
1196					       1, NULL, GFP_NOFS);
1197			break;
1198		case BTRFS_EXTENT_DATA_REF_KEY: {
1199			/* NORMAL INDIRECT DATA backref */
1200			struct btrfs_extent_data_ref *dref;
1201			int count;
1202			u64 root;
1203
1204			dref = btrfs_item_ptr(leaf, slot,
1205					      struct btrfs_extent_data_ref);
1206			count = btrfs_extent_data_ref_count(leaf, dref);
1207			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1208								      dref);
1209			key.type = BTRFS_EXTENT_DATA_KEY;
1210			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1211
1212			if (sc && key.objectid != sc->inum &&
1213			    !sc->have_delayed_delete_refs) {
1214				ret = BACKREF_FOUND_SHARED;
1215				break;
1216			}
1217
1218			root = btrfs_extent_data_ref_root(leaf, dref);
1219
1220			if (!ctx->skip_data_ref ||
1221			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1222						ctx->user_ctx))
1223				ret = add_indirect_ref(fs_info, preftrees, root,
1224						       &key, 0, ctx->bytenr,
1225						       count, sc, GFP_NOFS);
1226			break;
1227		}
1228		default:
1229			WARN_ON(1);
1230		}
1231		if (ret)
1232			return ret;
1233
1234	}
1235
1236	return ret;
1237}
1238
1239/*
1240 * The caller has joined a transaction or is holding a read lock on the
1241 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1242 * snapshot field changing while updating or checking the cache.
1243 */
1244static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1245					struct btrfs_root *root,
1246					u64 bytenr, int level, bool *is_shared)
1247{
1248	const struct btrfs_fs_info *fs_info = root->fs_info;
1249	struct btrfs_backref_shared_cache_entry *entry;
1250
1251	if (!current->journal_info)
1252		lockdep_assert_held(&fs_info->commit_root_sem);
1253
1254	if (!ctx->use_path_cache)
1255		return false;
1256
1257	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1258		return false;
1259
1260	/*
1261	 * Level -1 is used for the data extent, which is not reliable to cache
1262	 * because its reference count can increase or decrease without us
1263	 * realizing. We cache results only for extent buffers that lead from
1264	 * the root node down to the leaf with the file extent item.
1265	 */
1266	ASSERT(level >= 0);
1267
1268	entry = &ctx->path_cache_entries[level];
1269
1270	/* Unused cache entry or being used for some other extent buffer. */
1271	if (entry->bytenr != bytenr)
1272		return false;
1273
1274	/*
1275	 * We cached a false result, but the last snapshot generation of the
1276	 * root changed, so we now have a snapshot. Don't trust the result.
1277	 */
1278	if (!entry->is_shared &&
1279	    entry->gen != btrfs_root_last_snapshot(&root->root_item))
1280		return false;
1281
1282	/*
1283	 * If we cached a true result and the last generation used for dropping
1284	 * a root changed, we can not trust the result, because the dropped root
1285	 * could be a snapshot sharing this extent buffer.
1286	 */
1287	if (entry->is_shared &&
1288	    entry->gen != btrfs_get_last_root_drop_gen(fs_info))
1289		return false;
1290
1291	*is_shared = entry->is_shared;
1292	/*
1293	 * If the node at this level is shared, than all nodes below are also
1294	 * shared. Currently some of the nodes below may be marked as not shared
1295	 * because we have just switched from one leaf to another, and switched
1296	 * also other nodes above the leaf and below the current level, so mark
1297	 * them as shared.
1298	 */
1299	if (*is_shared) {
1300		for (int i = 0; i < level; i++) {
1301			ctx->path_cache_entries[i].is_shared = true;
1302			ctx->path_cache_entries[i].gen = entry->gen;
1303		}
1304	}
1305
1306	return true;
1307}
1308
1309/*
1310 * The caller has joined a transaction or is holding a read lock on the
1311 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1312 * snapshot field changing while updating or checking the cache.
1313 */
1314static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1315				       struct btrfs_root *root,
1316				       u64 bytenr, int level, bool is_shared)
1317{
1318	const struct btrfs_fs_info *fs_info = root->fs_info;
1319	struct btrfs_backref_shared_cache_entry *entry;
1320	u64 gen;
1321
1322	if (!current->journal_info)
1323		lockdep_assert_held(&fs_info->commit_root_sem);
1324
1325	if (!ctx->use_path_cache)
1326		return;
1327
1328	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1329		return;
1330
1331	/*
1332	 * Level -1 is used for the data extent, which is not reliable to cache
1333	 * because its reference count can increase or decrease without us
1334	 * realizing. We cache results only for extent buffers that lead from
1335	 * the root node down to the leaf with the file extent item.
1336	 */
1337	ASSERT(level >= 0);
1338
1339	if (is_shared)
1340		gen = btrfs_get_last_root_drop_gen(fs_info);
1341	else
1342		gen = btrfs_root_last_snapshot(&root->root_item);
1343
1344	entry = &ctx->path_cache_entries[level];
1345	entry->bytenr = bytenr;
1346	entry->is_shared = is_shared;
1347	entry->gen = gen;
1348
1349	/*
1350	 * If we found an extent buffer is shared, set the cache result for all
1351	 * extent buffers below it to true. As nodes in the path are COWed,
1352	 * their sharedness is moved to their children, and if a leaf is COWed,
1353	 * then the sharedness of a data extent becomes direct, the refcount of
1354	 * data extent is increased in the extent item at the extent tree.
1355	 */
1356	if (is_shared) {
1357		for (int i = 0; i < level; i++) {
1358			entry = &ctx->path_cache_entries[i];
1359			entry->is_shared = is_shared;
1360			entry->gen = gen;
1361		}
1362	}
1363}
1364
1365/*
1366 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1367 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1368 * indirect refs to their parent bytenr.
1369 * When roots are found, they're added to the roots list
1370 *
1371 * @ctx:     Backref walking context object, must be not NULL.
1372 * @sc:      If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1373 *           shared extent is detected.
1374 *
1375 * Otherwise this returns 0 for success and <0 for an error.
1376 *
1377 * FIXME some caching might speed things up
1378 */
1379static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1380			     struct share_check *sc)
1381{
1382	struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1383	struct btrfs_key key;
1384	struct btrfs_path *path;
1385	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1386	struct btrfs_delayed_ref_head *head;
1387	int info_level = 0;
1388	int ret;
1389	struct prelim_ref *ref;
1390	struct rb_node *node;
1391	struct extent_inode_elem *eie = NULL;
1392	struct preftrees preftrees = {
1393		.direct = PREFTREE_INIT,
1394		.indirect = PREFTREE_INIT,
1395		.indirect_missing_keys = PREFTREE_INIT
1396	};
1397
1398	/* Roots ulist is not needed when using a sharedness check context. */
1399	if (sc)
1400		ASSERT(ctx->roots == NULL);
1401
1402	key.objectid = ctx->bytenr;
1403	key.offset = (u64)-1;
1404	if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1405		key.type = BTRFS_METADATA_ITEM_KEY;
1406	else
1407		key.type = BTRFS_EXTENT_ITEM_KEY;
1408
1409	path = btrfs_alloc_path();
1410	if (!path)
1411		return -ENOMEM;
1412	if (!ctx->trans) {
1413		path->search_commit_root = 1;
1414		path->skip_locking = 1;
1415	}
1416
1417	if (ctx->time_seq == BTRFS_SEQ_LAST)
1418		path->skip_locking = 1;
1419
1420again:
1421	head = NULL;
1422
1423	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1424	if (ret < 0)
1425		goto out;
1426	if (ret == 0) {
1427		/*
1428		 * Key with offset -1 found, there would have to exist an extent
1429		 * item with such offset, but this is out of the valid range.
1430		 */
1431		ret = -EUCLEAN;
1432		goto out;
1433	}
1434
1435	if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1436	    ctx->time_seq != BTRFS_SEQ_LAST) {
1437		/*
1438		 * We have a specific time_seq we care about and trans which
1439		 * means we have the path lock, we need to grab the ref head and
1440		 * lock it so we have a consistent view of the refs at the given
1441		 * time.
1442		 */
1443		delayed_refs = &ctx->trans->transaction->delayed_refs;
1444		spin_lock(&delayed_refs->lock);
1445		head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs,
1446						   ctx->bytenr);
1447		if (head) {
1448			if (!mutex_trylock(&head->mutex)) {
1449				refcount_inc(&head->refs);
1450				spin_unlock(&delayed_refs->lock);
1451
1452				btrfs_release_path(path);
1453
1454				/*
1455				 * Mutex was contended, block until it's
1456				 * released and try again
1457				 */
1458				mutex_lock(&head->mutex);
1459				mutex_unlock(&head->mutex);
1460				btrfs_put_delayed_ref_head(head);
1461				goto again;
1462			}
1463			spin_unlock(&delayed_refs->lock);
1464			ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1465					       &preftrees, sc);
1466			mutex_unlock(&head->mutex);
1467			if (ret)
1468				goto out;
1469		} else {
1470			spin_unlock(&delayed_refs->lock);
1471		}
1472	}
1473
1474	if (path->slots[0]) {
1475		struct extent_buffer *leaf;
1476		int slot;
1477
1478		path->slots[0]--;
1479		leaf = path->nodes[0];
1480		slot = path->slots[0];
1481		btrfs_item_key_to_cpu(leaf, &key, slot);
1482		if (key.objectid == ctx->bytenr &&
1483		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1484		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1485			ret = add_inline_refs(ctx, path, &info_level,
1486					      &preftrees, sc);
1487			if (ret)
1488				goto out;
1489			ret = add_keyed_refs(ctx, root, path, info_level,
1490					     &preftrees, sc);
1491			if (ret)
1492				goto out;
1493		}
1494	}
1495
1496	/*
1497	 * If we have a share context and we reached here, it means the extent
1498	 * is not directly shared (no multiple reference items for it),
1499	 * otherwise we would have exited earlier with a return value of
1500	 * BACKREF_FOUND_SHARED after processing delayed references or while
1501	 * processing inline or keyed references from the extent tree.
1502	 * The extent may however be indirectly shared through shared subtrees
1503	 * as a result from creating snapshots, so we determine below what is
1504	 * its parent node, in case we are dealing with a metadata extent, or
1505	 * what's the leaf (or leaves), from a fs tree, that has a file extent
1506	 * item pointing to it in case we are dealing with a data extent.
1507	 */
1508	ASSERT(extent_is_shared(sc) == 0);
1509
1510	/*
1511	 * If we are here for a data extent and we have a share_check structure
1512	 * it means the data extent is not directly shared (does not have
1513	 * multiple reference items), so we have to check if a path in the fs
1514	 * tree (going from the root node down to the leaf that has the file
1515	 * extent item pointing to the data extent) is shared, that is, if any
1516	 * of the extent buffers in the path is referenced by other trees.
1517	 */
1518	if (sc && ctx->bytenr == sc->data_bytenr) {
1519		/*
1520		 * If our data extent is from a generation more recent than the
1521		 * last generation used to snapshot the root, then we know that
1522		 * it can not be shared through subtrees, so we can skip
1523		 * resolving indirect references, there's no point in
1524		 * determining the extent buffers for the path from the fs tree
1525		 * root node down to the leaf that has the file extent item that
1526		 * points to the data extent.
1527		 */
1528		if (sc->data_extent_gen >
1529		    btrfs_root_last_snapshot(&sc->root->root_item)) {
1530			ret = BACKREF_FOUND_NOT_SHARED;
1531			goto out;
1532		}
1533
1534		/*
1535		 * If we are only determining if a data extent is shared or not
1536		 * and the corresponding file extent item is located in the same
1537		 * leaf as the previous file extent item, we can skip resolving
1538		 * indirect references for a data extent, since the fs tree path
1539		 * is the same (same leaf, so same path). We skip as long as the
1540		 * cached result for the leaf is valid and only if there's only
1541		 * one file extent item pointing to the data extent, because in
1542		 * the case of multiple file extent items, they may be located
1543		 * in different leaves and therefore we have multiple paths.
1544		 */
1545		if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1546		    sc->self_ref_count == 1) {
1547			bool cached;
1548			bool is_shared;
1549
1550			cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1551						     sc->ctx->curr_leaf_bytenr,
1552						     0, &is_shared);
1553			if (cached) {
1554				if (is_shared)
1555					ret = BACKREF_FOUND_SHARED;
1556				else
1557					ret = BACKREF_FOUND_NOT_SHARED;
1558				goto out;
1559			}
1560		}
1561	}
1562
1563	btrfs_release_path(path);
1564
1565	ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1566	if (ret)
1567		goto out;
1568
1569	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1570
1571	ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
1572	if (ret)
1573		goto out;
1574
1575	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1576
1577	/*
1578	 * This walks the tree of merged and resolved refs. Tree blocks are
1579	 * read in as needed. Unique entries are added to the ulist, and
1580	 * the list of found roots is updated.
1581	 *
1582	 * We release the entire tree in one go before returning.
1583	 */
1584	node = rb_first_cached(&preftrees.direct.root);
1585	while (node) {
1586		ref = rb_entry(node, struct prelim_ref, rbnode);
1587		node = rb_next(&ref->rbnode);
1588		/*
1589		 * ref->count < 0 can happen here if there are delayed
1590		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1591		 * prelim_ref_insert() relies on this when merging
1592		 * identical refs to keep the overall count correct.
1593		 * prelim_ref_insert() will merge only those refs
1594		 * which compare identically.  Any refs having
1595		 * e.g. different offsets would not be merged,
1596		 * and would retain their original ref->count < 0.
1597		 */
1598		if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
1599			/* no parent == root of tree */
1600			ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1601			if (ret < 0)
1602				goto out;
1603		}
1604		if (ref->count && ref->parent) {
1605			if (!ctx->skip_inode_ref_list && !ref->inode_list &&
1606			    ref->level == 0) {
1607				struct btrfs_tree_parent_check check = { 0 };
1608				struct extent_buffer *eb;
1609
1610				check.level = ref->level;
1611
1612				eb = read_tree_block(ctx->fs_info, ref->parent,
1613						     &check);
1614				if (IS_ERR(eb)) {
1615					ret = PTR_ERR(eb);
1616					goto out;
1617				}
1618				if (!extent_buffer_uptodate(eb)) {
1619					free_extent_buffer(eb);
1620					ret = -EIO;
1621					goto out;
1622				}
1623
1624				if (!path->skip_locking)
1625					btrfs_tree_read_lock(eb);
1626				ret = find_extent_in_eb(ctx, eb, &eie);
1627				if (!path->skip_locking)
1628					btrfs_tree_read_unlock(eb);
1629				free_extent_buffer(eb);
1630				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1631				    ret < 0)
1632					goto out;
1633				ref->inode_list = eie;
1634				/*
1635				 * We transferred the list ownership to the ref,
1636				 * so set to NULL to avoid a double free in case
1637				 * an error happens after this.
1638				 */
1639				eie = NULL;
1640			}
1641			ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1642						  ref->inode_list,
1643						  (void **)&eie, GFP_NOFS);
1644			if (ret < 0)
1645				goto out;
1646			if (!ret && !ctx->skip_inode_ref_list) {
1647				/*
1648				 * We've recorded that parent, so we must extend
1649				 * its inode list here.
1650				 *
1651				 * However if there was corruption we may not
1652				 * have found an eie, return an error in this
1653				 * case.
1654				 */
1655				ASSERT(eie);
1656				if (!eie) {
1657					ret = -EUCLEAN;
1658					goto out;
1659				}
1660				while (eie->next)
1661					eie = eie->next;
1662				eie->next = ref->inode_list;
1663			}
1664			eie = NULL;
1665			/*
1666			 * We have transferred the inode list ownership from
1667			 * this ref to the ref we added to the 'refs' ulist.
1668			 * So set this ref's inode list to NULL to avoid
1669			 * use-after-free when our caller uses it or double
1670			 * frees in case an error happens before we return.
1671			 */
1672			ref->inode_list = NULL;
1673		}
1674		cond_resched();
1675	}
1676
1677out:
1678	btrfs_free_path(path);
1679
1680	prelim_release(&preftrees.direct);
1681	prelim_release(&preftrees.indirect);
1682	prelim_release(&preftrees.indirect_missing_keys);
1683
1684	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1685		free_inode_elem_list(eie);
1686	return ret;
1687}
1688
1689/*
1690 * Finds all leaves with a reference to the specified combination of
1691 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1692 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1693 * function. The caller should free the ulist with free_leaf_list() if
1694 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1695 * enough.
1696 *
1697 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1698 */
1699int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
1700{
1701	int ret;
1702
1703	ASSERT(ctx->refs == NULL);
1704
1705	ctx->refs = ulist_alloc(GFP_NOFS);
1706	if (!ctx->refs)
1707		return -ENOMEM;
1708
1709	ret = find_parent_nodes(ctx, NULL);
1710	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1711	    (ret < 0 && ret != -ENOENT)) {
1712		free_leaf_list(ctx->refs);
1713		ctx->refs = NULL;
1714		return ret;
1715	}
1716
1717	return 0;
1718}
1719
1720/*
1721 * Walk all backrefs for a given extent to find all roots that reference this
1722 * extent. Walking a backref means finding all extents that reference this
1723 * extent and in turn walk the backrefs of those, too. Naturally this is a
1724 * recursive process, but here it is implemented in an iterative fashion: We
1725 * find all referencing extents for the extent in question and put them on a
1726 * list. In turn, we find all referencing extents for those, further appending
1727 * to the list. The way we iterate the list allows adding more elements after
1728 * the current while iterating. The process stops when we reach the end of the
1729 * list.
1730 *
1731 * Found roots are added to @ctx->roots, which is allocated by this function if
1732 * it points to NULL, in which case the caller is responsible for freeing it
1733 * after it's not needed anymore.
1734 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1735 * ulist to do temporary work, and frees it before returning.
1736 *
1737 * Returns 0 on success, < 0 on error.
1738 */
1739static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
1740{
1741	const u64 orig_bytenr = ctx->bytenr;
1742	const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
1743	bool roots_ulist_allocated = false;
1744	struct ulist_iterator uiter;
1745	int ret = 0;
1746
1747	ASSERT(ctx->refs == NULL);
1748
1749	ctx->refs = ulist_alloc(GFP_NOFS);
1750	if (!ctx->refs)
1751		return -ENOMEM;
1752
1753	if (!ctx->roots) {
1754		ctx->roots = ulist_alloc(GFP_NOFS);
1755		if (!ctx->roots) {
1756			ulist_free(ctx->refs);
1757			ctx->refs = NULL;
1758			return -ENOMEM;
1759		}
1760		roots_ulist_allocated = true;
1761	}
1762
1763	ctx->skip_inode_ref_list = true;
1764
1765	ULIST_ITER_INIT(&uiter);
1766	while (1) {
1767		struct ulist_node *node;
1768
1769		ret = find_parent_nodes(ctx, NULL);
1770		if (ret < 0 && ret != -ENOENT) {
1771			if (roots_ulist_allocated) {
1772				ulist_free(ctx->roots);
1773				ctx->roots = NULL;
1774			}
1775			break;
1776		}
1777		ret = 0;
1778		node = ulist_next(ctx->refs, &uiter);
1779		if (!node)
1780			break;
1781		ctx->bytenr = node->val;
1782		cond_resched();
1783	}
1784
1785	ulist_free(ctx->refs);
1786	ctx->refs = NULL;
1787	ctx->bytenr = orig_bytenr;
1788	ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
1789
1790	return ret;
1791}
1792
1793int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1794			 bool skip_commit_root_sem)
1795{
1796	int ret;
1797
1798	if (!ctx->trans && !skip_commit_root_sem)
1799		down_read(&ctx->fs_info->commit_root_sem);
1800	ret = btrfs_find_all_roots_safe(ctx);
1801	if (!ctx->trans && !skip_commit_root_sem)
1802		up_read(&ctx->fs_info->commit_root_sem);
1803	return ret;
1804}
1805
1806struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1807{
1808	struct btrfs_backref_share_check_ctx *ctx;
1809
1810	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1811	if (!ctx)
1812		return NULL;
1813
1814	ulist_init(&ctx->refs);
1815
1816	return ctx;
1817}
1818
1819void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1820{
1821	if (!ctx)
1822		return;
1823
1824	ulist_release(&ctx->refs);
1825	kfree(ctx);
1826}
1827
1828/*
1829 * Check if a data extent is shared or not.
1830 *
1831 * @inode:       The inode whose extent we are checking.
1832 * @bytenr:      Logical bytenr of the extent we are checking.
1833 * @extent_gen:  Generation of the extent (file extent item) or 0 if it is
1834 *               not known.
1835 * @ctx:         A backref sharedness check context.
1836 *
1837 * btrfs_is_data_extent_shared uses the backref walking code but will short
1838 * circuit as soon as it finds a root or inode that doesn't match the
1839 * one passed in. This provides a significant performance benefit for
1840 * callers (such as fiemap) which want to know whether the extent is
1841 * shared but do not need a ref count.
1842 *
1843 * This attempts to attach to the running transaction in order to account for
1844 * delayed refs, but continues on even when no running transaction exists.
1845 *
1846 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1847 */
1848int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1849				u64 extent_gen,
1850				struct btrfs_backref_share_check_ctx *ctx)
1851{
1852	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1853	struct btrfs_root *root = inode->root;
1854	struct btrfs_fs_info *fs_info = root->fs_info;
1855	struct btrfs_trans_handle *trans;
1856	struct ulist_iterator uiter;
1857	struct ulist_node *node;
1858	struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1859	int ret = 0;
1860	struct share_check shared = {
1861		.ctx = ctx,
1862		.root = root,
1863		.inum = btrfs_ino(inode),
1864		.data_bytenr = bytenr,
1865		.data_extent_gen = extent_gen,
1866		.share_count = 0,
1867		.self_ref_count = 0,
1868		.have_delayed_delete_refs = false,
1869	};
1870	int level;
1871	bool leaf_cached;
1872	bool leaf_is_shared;
1873
1874	for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1875		if (ctx->prev_extents_cache[i].bytenr == bytenr)
1876			return ctx->prev_extents_cache[i].is_shared;
1877	}
1878
1879	ulist_init(&ctx->refs);
1880
1881	trans = btrfs_join_transaction_nostart(root);
1882	if (IS_ERR(trans)) {
1883		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1884			ret = PTR_ERR(trans);
1885			goto out;
1886		}
1887		trans = NULL;
1888		down_read(&fs_info->commit_root_sem);
1889	} else {
1890		btrfs_get_tree_mod_seq(fs_info, &elem);
1891		walk_ctx.time_seq = elem.seq;
1892	}
1893
1894	ctx->use_path_cache = true;
1895
1896	/*
1897	 * We may have previously determined that the current leaf is shared.
1898	 * If it is, then we have a data extent that is shared due to a shared
1899	 * subtree (caused by snapshotting) and we don't need to check for data
1900	 * backrefs. If the leaf is not shared, then we must do backref walking
1901	 * to determine if the data extent is shared through reflinks.
1902	 */
1903	leaf_cached = lookup_backref_shared_cache(ctx, root,
1904						  ctx->curr_leaf_bytenr, 0,
1905						  &leaf_is_shared);
1906	if (leaf_cached && leaf_is_shared) {
1907		ret = 1;
1908		goto out_trans;
1909	}
1910
1911	walk_ctx.skip_inode_ref_list = true;
1912	walk_ctx.trans = trans;
1913	walk_ctx.fs_info = fs_info;
1914	walk_ctx.refs = &ctx->refs;
1915
1916	/* -1 means we are in the bytenr of the data extent. */
1917	level = -1;
1918	ULIST_ITER_INIT(&uiter);
 
1919	while (1) {
1920		const unsigned long prev_ref_count = ctx->refs.nnodes;
 
1921
1922		walk_ctx.bytenr = bytenr;
1923		ret = find_parent_nodes(&walk_ctx, &shared);
1924		if (ret == BACKREF_FOUND_SHARED ||
1925		    ret == BACKREF_FOUND_NOT_SHARED) {
1926			/* If shared must return 1, otherwise return 0. */
1927			ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1928			if (level >= 0)
1929				store_backref_shared_cache(ctx, root, bytenr,
1930							   level, ret == 1);
1931			break;
1932		}
1933		if (ret < 0 && ret != -ENOENT)
1934			break;
1935		ret = 0;
1936
1937		/*
1938		 * More than one extent buffer (bytenr) may have been added to
1939		 * the ctx->refs ulist, in which case we have to check multiple
1940		 * tree paths in case the first one is not shared, so we can not
1941		 * use the path cache which is made for a single path. Multiple
1942		 * extent buffers at the current level happen when:
1943		 *
1944		 * 1) level -1, the data extent: If our data extent was not
1945		 *    directly shared (without multiple reference items), then
1946		 *    it might have a single reference item with a count > 1 for
1947		 *    the same offset, which means there are 2 (or more) file
1948		 *    extent items that point to the data extent - this happens
1949		 *    when a file extent item needs to be split and then one
1950		 *    item gets moved to another leaf due to a b+tree leaf split
1951		 *    when inserting some item. In this case the file extent
1952		 *    items may be located in different leaves and therefore
1953		 *    some of the leaves may be referenced through shared
1954		 *    subtrees while others are not. Since our extent buffer
1955		 *    cache only works for a single path (by far the most common
1956		 *    case and simpler to deal with), we can not use it if we
1957		 *    have multiple leaves (which implies multiple paths).
1958		 *
1959		 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
1960		 *    and indirect references on a b+tree node/leaf, so we have
1961		 *    to check multiple paths, and the extent buffer (the
1962		 *    current bytenr) may be shared or not. One example is
1963		 *    during relocation as we may get a shared tree block ref
1964		 *    (direct ref) and a non-shared tree block ref (indirect
1965		 *    ref) for the same node/leaf.
1966		 */
1967		if ((ctx->refs.nnodes - prev_ref_count) > 1)
1968			ctx->use_path_cache = false;
1969
1970		if (level >= 0)
1971			store_backref_shared_cache(ctx, root, bytenr,
1972						   level, false);
1973		node = ulist_next(&ctx->refs, &uiter);
1974		if (!node)
1975			break;
1976		bytenr = node->val;
1977		if (ctx->use_path_cache) {
1978			bool is_shared;
1979			bool cached;
1980
1981			level++;
1982			cached = lookup_backref_shared_cache(ctx, root, bytenr,
1983							     level, &is_shared);
1984			if (cached) {
1985				ret = (is_shared ? 1 : 0);
1986				break;
1987			}
1988		}
1989		shared.share_count = 0;
1990		shared.have_delayed_delete_refs = false;
1991		cond_resched();
1992	}
1993
1994	/*
1995	 * If the path cache is disabled, then it means at some tree level we
1996	 * got multiple parents due to a mix of direct and indirect backrefs or
1997	 * multiple leaves with file extent items pointing to the same data
1998	 * extent. We have to invalidate the cache and cache only the sharedness
1999	 * result for the levels where we got only one node/reference.
2000	 */
2001	if (!ctx->use_path_cache) {
2002		int i = 0;
2003
2004		level--;
2005		if (ret >= 0 && level >= 0) {
2006			bytenr = ctx->path_cache_entries[level].bytenr;
2007			ctx->use_path_cache = true;
2008			store_backref_shared_cache(ctx, root, bytenr, level, ret);
2009			i = level + 1;
2010		}
2011
2012		for ( ; i < BTRFS_MAX_LEVEL; i++)
2013			ctx->path_cache_entries[i].bytenr = 0;
2014	}
2015
2016	/*
2017	 * Cache the sharedness result for the data extent if we know our inode
2018	 * has more than 1 file extent item that refers to the data extent.
2019	 */
2020	if (ret >= 0 && shared.self_ref_count > 1) {
2021		int slot = ctx->prev_extents_cache_slot;
2022
2023		ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
2024		ctx->prev_extents_cache[slot].is_shared = (ret == 1);
2025
2026		slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
2027		ctx->prev_extents_cache_slot = slot;
2028	}
2029
2030out_trans:
2031	if (trans) {
2032		btrfs_put_tree_mod_seq(fs_info, &elem);
2033		btrfs_end_transaction(trans);
2034	} else {
2035		up_read(&fs_info->commit_root_sem);
2036	}
2037out:
2038	ulist_release(&ctx->refs);
2039	ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2040
2041	return ret;
2042}
2043
2044int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2045			  u64 start_off, struct btrfs_path *path,
2046			  struct btrfs_inode_extref **ret_extref,
2047			  u64 *found_off)
2048{
2049	int ret, slot;
2050	struct btrfs_key key;
2051	struct btrfs_key found_key;
2052	struct btrfs_inode_extref *extref;
2053	const struct extent_buffer *leaf;
2054	unsigned long ptr;
2055
2056	key.objectid = inode_objectid;
2057	key.type = BTRFS_INODE_EXTREF_KEY;
2058	key.offset = start_off;
2059
2060	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2061	if (ret < 0)
2062		return ret;
2063
2064	while (1) {
2065		leaf = path->nodes[0];
2066		slot = path->slots[0];
2067		if (slot >= btrfs_header_nritems(leaf)) {
2068			/*
2069			 * If the item at offset is not found,
2070			 * btrfs_search_slot will point us to the slot
2071			 * where it should be inserted. In our case
2072			 * that will be the slot directly before the
2073			 * next INODE_REF_KEY_V2 item. In the case
2074			 * that we're pointing to the last slot in a
2075			 * leaf, we must move one leaf over.
2076			 */
2077			ret = btrfs_next_leaf(root, path);
2078			if (ret) {
2079				if (ret >= 1)
2080					ret = -ENOENT;
2081				break;
2082			}
2083			continue;
2084		}
2085
2086		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2087
2088		/*
2089		 * Check that we're still looking at an extended ref key for
2090		 * this particular objectid. If we have different
2091		 * objectid or type then there are no more to be found
2092		 * in the tree and we can exit.
2093		 */
2094		ret = -ENOENT;
2095		if (found_key.objectid != inode_objectid)
2096			break;
2097		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2098			break;
2099
2100		ret = 0;
2101		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2102		extref = (struct btrfs_inode_extref *)ptr;
2103		*ret_extref = extref;
2104		if (found_off)
2105			*found_off = found_key.offset;
2106		break;
2107	}
2108
2109	return ret;
2110}
2111
2112/*
2113 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2114 * Elements of the path are separated by '/' and the path is guaranteed to be
2115 * 0-terminated. the path is only given within the current file system.
2116 * Therefore, it never starts with a '/'. the caller is responsible to provide
2117 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2118 * the start point of the resulting string is returned. this pointer is within
2119 * dest, normally.
2120 * in case the path buffer would overflow, the pointer is decremented further
2121 * as if output was written to the buffer, though no more output is actually
2122 * generated. that way, the caller can determine how much space would be
2123 * required for the path to fit into the buffer. in that case, the returned
2124 * value will be smaller than dest. callers must check this!
2125 */
2126char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2127			u32 name_len, unsigned long name_off,
2128			struct extent_buffer *eb_in, u64 parent,
2129			char *dest, u32 size)
2130{
2131	int slot;
2132	u64 next_inum;
2133	int ret;
2134	s64 bytes_left = ((s64)size) - 1;
2135	struct extent_buffer *eb = eb_in;
2136	struct btrfs_key found_key;
2137	struct btrfs_inode_ref *iref;
2138
2139	if (bytes_left >= 0)
2140		dest[bytes_left] = '\0';
2141
2142	while (1) {
2143		bytes_left -= name_len;
2144		if (bytes_left >= 0)
2145			read_extent_buffer(eb, dest + bytes_left,
2146					   name_off, name_len);
2147		if (eb != eb_in) {
2148			if (!path->skip_locking)
2149				btrfs_tree_read_unlock(eb);
2150			free_extent_buffer(eb);
2151		}
2152		ret = btrfs_find_item(fs_root, path, parent, 0,
2153				BTRFS_INODE_REF_KEY, &found_key);
2154		if (ret > 0)
2155			ret = -ENOENT;
2156		if (ret)
2157			break;
2158
2159		next_inum = found_key.offset;
2160
2161		/* regular exit ahead */
2162		if (parent == next_inum)
2163			break;
2164
2165		slot = path->slots[0];
2166		eb = path->nodes[0];
2167		/* make sure we can use eb after releasing the path */
2168		if (eb != eb_in) {
2169			path->nodes[0] = NULL;
2170			path->locks[0] = 0;
2171		}
2172		btrfs_release_path(path);
2173		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2174
2175		name_len = btrfs_inode_ref_name_len(eb, iref);
2176		name_off = (unsigned long)(iref + 1);
2177
2178		parent = next_inum;
2179		--bytes_left;
2180		if (bytes_left >= 0)
2181			dest[bytes_left] = '/';
2182	}
2183
2184	btrfs_release_path(path);
2185
2186	if (ret)
2187		return ERR_PTR(ret);
2188
2189	return dest + bytes_left;
2190}
2191
2192/*
2193 * this makes the path point to (logical EXTENT_ITEM *)
2194 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2195 * tree blocks and <0 on error.
2196 */
2197int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2198			struct btrfs_path *path, struct btrfs_key *found_key,
2199			u64 *flags_ret)
2200{
2201	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2202	int ret;
2203	u64 flags;
2204	u64 size = 0;
2205	u32 item_size;
2206	const struct extent_buffer *eb;
2207	struct btrfs_extent_item *ei;
2208	struct btrfs_key key;
2209
2210	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2211		key.type = BTRFS_METADATA_ITEM_KEY;
2212	else
2213		key.type = BTRFS_EXTENT_ITEM_KEY;
2214	key.objectid = logical;
2215	key.offset = (u64)-1;
2216
2217	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2218	if (ret < 0)
2219		return ret;
2220	if (ret == 0) {
2221		/*
2222		 * Key with offset -1 found, there would have to exist an extent
2223		 * item with such offset, but this is out of the valid range.
2224		 */
2225		return -EUCLEAN;
2226	}
2227
2228	ret = btrfs_previous_extent_item(extent_root, path, 0);
2229	if (ret) {
2230		if (ret > 0)
2231			ret = -ENOENT;
2232		return ret;
2233	}
2234	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2235	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2236		size = fs_info->nodesize;
2237	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2238		size = found_key->offset;
2239
2240	if (found_key->objectid > logical ||
2241	    found_key->objectid + size <= logical) {
2242		btrfs_debug(fs_info,
2243			"logical %llu is not within any extent", logical);
2244		return -ENOENT;
2245	}
2246
2247	eb = path->nodes[0];
2248	item_size = btrfs_item_size(eb, path->slots[0]);
 
2249
2250	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2251	flags = btrfs_extent_flags(eb, ei);
2252
2253	btrfs_debug(fs_info,
2254		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2255		 logical, logical - found_key->objectid, found_key->objectid,
2256		 found_key->offset, flags, item_size);
2257
2258	WARN_ON(!flags_ret);
2259	if (flags_ret) {
2260		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2261			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2262		else if (flags & BTRFS_EXTENT_FLAG_DATA)
2263			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
2264		else
2265			BUG();
2266		return 0;
2267	}
2268
2269	return -EIO;
2270}
2271
2272/*
2273 * helper function to iterate extent inline refs. ptr must point to a 0 value
2274 * for the first call and may be modified. it is used to track state.
2275 * if more refs exist, 0 is returned and the next call to
2276 * get_extent_inline_ref must pass the modified ptr parameter to get the
2277 * next ref. after the last ref was processed, 1 is returned.
2278 * returns <0 on error
2279 */
2280static int get_extent_inline_ref(unsigned long *ptr,
2281				 const struct extent_buffer *eb,
2282				 const struct btrfs_key *key,
2283				 const struct btrfs_extent_item *ei,
2284				 u32 item_size,
2285				 struct btrfs_extent_inline_ref **out_eiref,
2286				 int *out_type)
2287{
2288	unsigned long end;
2289	u64 flags;
2290	struct btrfs_tree_block_info *info;
2291
2292	if (!*ptr) {
2293		/* first call */
2294		flags = btrfs_extent_flags(eb, ei);
2295		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2296			if (key->type == BTRFS_METADATA_ITEM_KEY) {
2297				/* a skinny metadata extent */
2298				*out_eiref =
2299				     (struct btrfs_extent_inline_ref *)(ei + 1);
2300			} else {
2301				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2302				info = (struct btrfs_tree_block_info *)(ei + 1);
2303				*out_eiref =
2304				   (struct btrfs_extent_inline_ref *)(info + 1);
2305			}
2306		} else {
2307			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2308		}
2309		*ptr = (unsigned long)*out_eiref;
2310		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2311			return -ENOENT;
2312	}
2313
2314	end = (unsigned long)ei + item_size;
2315	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2316	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2317						     BTRFS_REF_TYPE_ANY);
2318	if (*out_type == BTRFS_REF_TYPE_INVALID)
2319		return -EUCLEAN;
2320
2321	*ptr += btrfs_extent_inline_ref_size(*out_type);
2322	WARN_ON(*ptr > end);
2323	if (*ptr == end)
2324		return 1; /* last */
2325
2326	return 0;
2327}
2328
2329/*
2330 * reads the tree block backref for an extent. tree level and root are returned
2331 * through out_level and out_root. ptr must point to a 0 value for the first
2332 * call and may be modified (see get_extent_inline_ref comment).
2333 * returns 0 if data was provided, 1 if there was no more data to provide or
2334 * <0 on error.
2335 */
2336int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2337			    struct btrfs_key *key, struct btrfs_extent_item *ei,
2338			    u32 item_size, u64 *out_root, u8 *out_level)
2339{
2340	int ret;
2341	int type;
2342	struct btrfs_extent_inline_ref *eiref;
2343
2344	if (*ptr == (unsigned long)-1)
2345		return 1;
2346
2347	while (1) {
2348		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2349					      &eiref, &type);
2350		if (ret < 0)
2351			return ret;
2352
2353		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2354		    type == BTRFS_SHARED_BLOCK_REF_KEY)
2355			break;
2356
2357		if (ret == 1)
2358			return 1;
2359	}
2360
2361	/* we can treat both ref types equally here */
2362	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2363
2364	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2365		struct btrfs_tree_block_info *info;
2366
2367		info = (struct btrfs_tree_block_info *)(ei + 1);
2368		*out_level = btrfs_tree_block_level(eb, info);
2369	} else {
2370		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2371		*out_level = (u8)key->offset;
2372	}
2373
2374	if (ret == 1)
2375		*ptr = (unsigned long)-1;
2376
2377	return 0;
2378}
2379
2380static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2381			     struct extent_inode_elem *inode_list,
2382			     u64 root, u64 extent_item_objectid,
2383			     iterate_extent_inodes_t *iterate, void *ctx)
2384{
2385	struct extent_inode_elem *eie;
2386	int ret = 0;
2387
2388	for (eie = inode_list; eie; eie = eie->next) {
2389		btrfs_debug(fs_info,
2390			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2391			    extent_item_objectid, eie->inum,
2392			    eie->offset, root);
2393		ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2394		if (ret) {
2395			btrfs_debug(fs_info,
2396				    "stopping iteration for %llu due to ret=%d",
2397				    extent_item_objectid, ret);
2398			break;
2399		}
2400	}
2401
2402	return ret;
2403}
2404
2405/*
2406 * calls iterate() for every inode that references the extent identified by
2407 * the given parameters.
2408 * when the iterator function returns a non-zero value, iteration stops.
2409 */
2410int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2411			  bool search_commit_root,
2412			  iterate_extent_inodes_t *iterate, void *user_ctx)
2413{
2414	int ret;
2415	struct ulist *refs;
2416	struct ulist_node *ref_node;
2417	struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2418	struct ulist_iterator ref_uiter;
2419
2420	btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2421		    ctx->bytenr);
2422
2423	ASSERT(ctx->trans == NULL);
2424	ASSERT(ctx->roots == NULL);
2425
2426	if (!search_commit_root) {
2427		struct btrfs_trans_handle *trans;
2428
2429		trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2430		if (IS_ERR(trans)) {
2431			if (PTR_ERR(trans) != -ENOENT &&
2432			    PTR_ERR(trans) != -EROFS)
2433				return PTR_ERR(trans);
2434			trans = NULL;
2435		}
2436		ctx->trans = trans;
2437	}
2438
2439	if (ctx->trans) {
2440		btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2441		ctx->time_seq = seq_elem.seq;
2442	} else {
2443		down_read(&ctx->fs_info->commit_root_sem);
2444	}
2445
2446	ret = btrfs_find_all_leafs(ctx);
2447	if (ret)
2448		goto out;
2449	refs = ctx->refs;
2450	ctx->refs = NULL;
2451
2452	ULIST_ITER_INIT(&ref_uiter);
2453	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2454		const u64 leaf_bytenr = ref_node->val;
2455		struct ulist_node *root_node;
2456		struct ulist_iterator root_uiter;
2457		struct extent_inode_elem *inode_list;
2458
2459		inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2460
2461		if (ctx->cache_lookup) {
2462			const u64 *root_ids;
2463			int root_count;
2464			bool cached;
2465
2466			cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2467						   &root_ids, &root_count);
2468			if (cached) {
2469				for (int i = 0; i < root_count; i++) {
2470					ret = iterate_leaf_refs(ctx->fs_info,
2471								inode_list,
2472								root_ids[i],
2473								leaf_bytenr,
2474								iterate,
2475								user_ctx);
2476					if (ret)
2477						break;
2478				}
2479				continue;
2480			}
2481		}
2482
2483		if (!ctx->roots) {
2484			ctx->roots = ulist_alloc(GFP_NOFS);
2485			if (!ctx->roots) {
2486				ret = -ENOMEM;
2487				break;
2488			}
2489		}
2490
2491		ctx->bytenr = leaf_bytenr;
2492		ret = btrfs_find_all_roots_safe(ctx);
2493		if (ret)
2494			break;
2495
2496		if (ctx->cache_store)
2497			ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2498
2499		ULIST_ITER_INIT(&root_uiter);
2500		while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2501			btrfs_debug(ctx->fs_info,
2502				    "root %llu references leaf %llu, data list %#llx",
2503				    root_node->val, ref_node->val,
2504				    ref_node->aux);
2505			ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2506						root_node->val, ctx->bytenr,
2507						iterate, user_ctx);
2508		}
2509		ulist_reinit(ctx->roots);
2510	}
2511
2512	free_leaf_list(refs);
2513out:
2514	if (ctx->trans) {
2515		btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2516		btrfs_end_transaction(ctx->trans);
2517		ctx->trans = NULL;
2518	} else {
2519		up_read(&ctx->fs_info->commit_root_sem);
2520	}
2521
2522	ulist_free(ctx->roots);
2523	ctx->roots = NULL;
2524
2525	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2526		ret = 0;
2527
2528	return ret;
2529}
2530
2531static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2532{
2533	struct btrfs_data_container *inodes = ctx;
2534	const size_t c = 3 * sizeof(u64);
2535
2536	if (inodes->bytes_left >= c) {
2537		inodes->bytes_left -= c;
2538		inodes->val[inodes->elem_cnt] = inum;
2539		inodes->val[inodes->elem_cnt + 1] = offset;
2540		inodes->val[inodes->elem_cnt + 2] = root;
2541		inodes->elem_cnt += 3;
2542	} else {
2543		inodes->bytes_missing += c - inodes->bytes_left;
2544		inodes->bytes_left = 0;
2545		inodes->elem_missed += 3;
2546	}
2547
2548	return 0;
2549}
2550
2551int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2552				struct btrfs_path *path,
2553				void *ctx, bool ignore_offset)
2554{
2555	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2556	int ret;
2557	u64 flags = 0;
2558	struct btrfs_key found_key;
2559	int search_commit_root = path->search_commit_root;
2560
2561	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2562	btrfs_release_path(path);
2563	if (ret < 0)
2564		return ret;
2565	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2566		return -EINVAL;
2567
2568	walk_ctx.bytenr = found_key.objectid;
2569	if (ignore_offset)
2570		walk_ctx.ignore_extent_item_pos = true;
2571	else
2572		walk_ctx.extent_item_pos = logical - found_key.objectid;
2573	walk_ctx.fs_info = fs_info;
2574
2575	return iterate_extent_inodes(&walk_ctx, search_commit_root,
2576				     build_ino_list, ctx);
2577}
2578
2579static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2580			 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2581
2582static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2583{
2584	int ret = 0;
2585	int slot;
2586	u32 cur;
2587	u32 len;
2588	u32 name_len;
2589	u64 parent = 0;
2590	int found = 0;
2591	struct btrfs_root *fs_root = ipath->fs_root;
2592	struct btrfs_path *path = ipath->btrfs_path;
2593	struct extent_buffer *eb;
2594	struct btrfs_inode_ref *iref;
2595	struct btrfs_key found_key;
2596
2597	while (!ret) {
2598		ret = btrfs_find_item(fs_root, path, inum,
2599				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2600				&found_key);
2601
2602		if (ret < 0)
2603			break;
2604		if (ret) {
2605			ret = found ? 0 : -ENOENT;
2606			break;
2607		}
2608		++found;
2609
2610		parent = found_key.offset;
2611		slot = path->slots[0];
2612		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2613		if (!eb) {
2614			ret = -ENOMEM;
2615			break;
2616		}
2617		btrfs_release_path(path);
2618
2619		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2620
2621		for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2622			name_len = btrfs_inode_ref_name_len(eb, iref);
2623			/* path must be released before calling iterate()! */
2624			btrfs_debug(fs_root->fs_info,
2625				"following ref at offset %u for inode %llu in tree %llu",
2626				cur, found_key.objectid,
2627				btrfs_root_id(fs_root));
2628			ret = inode_to_path(parent, name_len,
2629				      (unsigned long)(iref + 1), eb, ipath);
2630			if (ret)
2631				break;
2632			len = sizeof(*iref) + name_len;
2633			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2634		}
2635		free_extent_buffer(eb);
2636	}
2637
2638	btrfs_release_path(path);
2639
2640	return ret;
2641}
2642
2643static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2644{
2645	int ret;
2646	int slot;
2647	u64 offset = 0;
2648	u64 parent;
2649	int found = 0;
2650	struct btrfs_root *fs_root = ipath->fs_root;
2651	struct btrfs_path *path = ipath->btrfs_path;
2652	struct extent_buffer *eb;
2653	struct btrfs_inode_extref *extref;
2654	u32 item_size;
2655	u32 cur_offset;
2656	unsigned long ptr;
2657
2658	while (1) {
2659		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2660					    &offset);
2661		if (ret < 0)
2662			break;
2663		if (ret) {
2664			ret = found ? 0 : -ENOENT;
2665			break;
2666		}
2667		++found;
2668
2669		slot = path->slots[0];
2670		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2671		if (!eb) {
2672			ret = -ENOMEM;
2673			break;
2674		}
2675		btrfs_release_path(path);
2676
2677		item_size = btrfs_item_size(eb, slot);
2678		ptr = btrfs_item_ptr_offset(eb, slot);
2679		cur_offset = 0;
2680
2681		while (cur_offset < item_size) {
2682			u32 name_len;
2683
2684			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2685			parent = btrfs_inode_extref_parent(eb, extref);
2686			name_len = btrfs_inode_extref_name_len(eb, extref);
2687			ret = inode_to_path(parent, name_len,
2688				      (unsigned long)&extref->name, eb, ipath);
2689			if (ret)
2690				break;
2691
2692			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2693			cur_offset += sizeof(*extref);
2694		}
2695		free_extent_buffer(eb);
2696
2697		offset++;
2698	}
2699
2700	btrfs_release_path(path);
2701
2702	return ret;
2703}
2704
2705/*
2706 * returns 0 if the path could be dumped (probably truncated)
2707 * returns <0 in case of an error
2708 */
2709static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2710			 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2711{
2712	char *fspath;
2713	char *fspath_min;
2714	int i = ipath->fspath->elem_cnt;
2715	const int s_ptr = sizeof(char *);
2716	u32 bytes_left;
2717
2718	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2719					ipath->fspath->bytes_left - s_ptr : 0;
2720
2721	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2722	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2723				   name_off, eb, inum, fspath_min, bytes_left);
2724	if (IS_ERR(fspath))
2725		return PTR_ERR(fspath);
2726
2727	if (fspath > fspath_min) {
2728		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2729		++ipath->fspath->elem_cnt;
2730		ipath->fspath->bytes_left = fspath - fspath_min;
2731	} else {
2732		++ipath->fspath->elem_missed;
2733		ipath->fspath->bytes_missing += fspath_min - fspath;
2734		ipath->fspath->bytes_left = 0;
2735	}
2736
2737	return 0;
2738}
2739
2740/*
2741 * this dumps all file system paths to the inode into the ipath struct, provided
2742 * is has been created large enough. each path is zero-terminated and accessed
2743 * from ipath->fspath->val[i].
2744 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2745 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2746 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2747 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2748 * have been needed to return all paths.
2749 */
2750int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2751{
2752	int ret;
2753	int found_refs = 0;
2754
2755	ret = iterate_inode_refs(inum, ipath);
2756	if (!ret)
2757		++found_refs;
2758	else if (ret != -ENOENT)
2759		return ret;
2760
2761	ret = iterate_inode_extrefs(inum, ipath);
2762	if (ret == -ENOENT && found_refs)
2763		return 0;
2764
2765	return ret;
2766}
2767
2768struct btrfs_data_container *init_data_container(u32 total_bytes)
2769{
2770	struct btrfs_data_container *data;
2771	size_t alloc_bytes;
2772
2773	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2774	data = kvzalloc(alloc_bytes, GFP_KERNEL);
2775	if (!data)
2776		return ERR_PTR(-ENOMEM);
2777
2778	if (total_bytes >= sizeof(*data))
2779		data->bytes_left = total_bytes - sizeof(*data);
2780	else
 
2781		data->bytes_missing = sizeof(*data) - total_bytes;
 
 
 
 
 
2782
2783	return data;
2784}
2785
2786/*
2787 * allocates space to return multiple file system paths for an inode.
2788 * total_bytes to allocate are passed, note that space usable for actual path
2789 * information will be total_bytes - sizeof(struct inode_fs_paths).
2790 * the returned pointer must be freed with free_ipath() in the end.
2791 */
2792struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2793					struct btrfs_path *path)
2794{
2795	struct inode_fs_paths *ifp;
2796	struct btrfs_data_container *fspath;
2797
2798	fspath = init_data_container(total_bytes);
2799	if (IS_ERR(fspath))
2800		return ERR_CAST(fspath);
2801
2802	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2803	if (!ifp) {
2804		kvfree(fspath);
2805		return ERR_PTR(-ENOMEM);
2806	}
2807
2808	ifp->btrfs_path = path;
2809	ifp->fspath = fspath;
2810	ifp->fs_root = fs_root;
2811
2812	return ifp;
2813}
2814
2815void free_ipath(struct inode_fs_paths *ipath)
2816{
2817	if (!ipath)
2818		return;
2819	kvfree(ipath->fspath);
2820	kfree(ipath);
2821}
2822
2823struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
2824{
2825	struct btrfs_backref_iter *ret;
2826
2827	ret = kzalloc(sizeof(*ret), GFP_NOFS);
2828	if (!ret)
2829		return NULL;
2830
2831	ret->path = btrfs_alloc_path();
2832	if (!ret->path) {
2833		kfree(ret);
2834		return NULL;
2835	}
2836
2837	/* Current backref iterator only supports iteration in commit root */
2838	ret->path->search_commit_root = 1;
2839	ret->path->skip_locking = 1;
2840	ret->fs_info = fs_info;
2841
2842	return ret;
2843}
2844
2845static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
2846{
2847	iter->bytenr = 0;
2848	iter->item_ptr = 0;
2849	iter->cur_ptr = 0;
2850	iter->end_ptr = 0;
2851	btrfs_release_path(iter->path);
2852	memset(&iter->cur_key, 0, sizeof(iter->cur_key));
2853}
2854
2855int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2856{
2857	struct btrfs_fs_info *fs_info = iter->fs_info;
2858	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2859	struct btrfs_path *path = iter->path;
2860	struct btrfs_extent_item *ei;
2861	struct btrfs_key key;
2862	int ret;
2863
2864	key.objectid = bytenr;
2865	key.type = BTRFS_METADATA_ITEM_KEY;
2866	key.offset = (u64)-1;
2867	iter->bytenr = bytenr;
2868
2869	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2870	if (ret < 0)
2871		return ret;
2872	if (ret == 0) {
2873		/*
2874		 * Key with offset -1 found, there would have to exist an extent
2875		 * item with such offset, but this is out of the valid range.
2876		 */
2877		ret = -EUCLEAN;
2878		goto release;
2879	}
2880	if (path->slots[0] == 0) {
2881		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2882		ret = -EUCLEAN;
2883		goto release;
2884	}
2885	path->slots[0]--;
2886
2887	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2888	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2889	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2890		ret = -ENOENT;
2891		goto release;
2892	}
2893	memcpy(&iter->cur_key, &key, sizeof(key));
2894	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2895						    path->slots[0]);
2896	iter->end_ptr = (u32)(iter->item_ptr +
2897			btrfs_item_size(path->nodes[0], path->slots[0]));
2898	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2899			    struct btrfs_extent_item);
2900
2901	/*
2902	 * Only support iteration on tree backref yet.
2903	 *
2904	 * This is an extra precaution for non skinny-metadata, where
2905	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2906	 * extent flags to determine if it's a tree block.
2907	 */
2908	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2909		ret = -ENOTSUPP;
2910		goto release;
2911	}
2912	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2913
2914	/* If there is no inline backref, go search for keyed backref */
2915	if (iter->cur_ptr >= iter->end_ptr) {
2916		ret = btrfs_next_item(extent_root, path);
2917
2918		/* No inline nor keyed ref */
2919		if (ret > 0) {
2920			ret = -ENOENT;
2921			goto release;
2922		}
2923		if (ret < 0)
2924			goto release;
2925
2926		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2927				path->slots[0]);
2928		if (iter->cur_key.objectid != bytenr ||
2929		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2930		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2931			ret = -ENOENT;
2932			goto release;
2933		}
2934		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2935							   path->slots[0]);
2936		iter->item_ptr = iter->cur_ptr;
2937		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2938				      path->nodes[0], path->slots[0]));
2939	}
2940
2941	return 0;
2942release:
2943	btrfs_backref_iter_release(iter);
2944	return ret;
2945}
2946
2947static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter)
2948{
2949	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
2950	    iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
2951		return true;
2952	return false;
2953}
2954
2955/*
2956 * Go to the next backref item of current bytenr, can be either inlined or
2957 * keyed.
2958 *
2959 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2960 *
2961 * Return 0 if we get next backref without problem.
2962 * Return >0 if there is no extra backref for this bytenr.
2963 * Return <0 if there is something wrong happened.
2964 */
2965int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2966{
2967	struct extent_buffer *eb = iter->path->nodes[0];
2968	struct btrfs_root *extent_root;
2969	struct btrfs_path *path = iter->path;
2970	struct btrfs_extent_inline_ref *iref;
2971	int ret;
2972	u32 size;
2973
2974	if (btrfs_backref_iter_is_inline_ref(iter)) {
2975		/* We're still inside the inline refs */
2976		ASSERT(iter->cur_ptr < iter->end_ptr);
2977
2978		if (btrfs_backref_has_tree_block_info(iter)) {
2979			/* First tree block info */
2980			size = sizeof(struct btrfs_tree_block_info);
2981		} else {
2982			/* Use inline ref type to determine the size */
2983			int type;
2984
2985			iref = (struct btrfs_extent_inline_ref *)
2986				((unsigned long)iter->cur_ptr);
2987			type = btrfs_extent_inline_ref_type(eb, iref);
2988
2989			size = btrfs_extent_inline_ref_size(type);
2990		}
2991		iter->cur_ptr += size;
2992		if (iter->cur_ptr < iter->end_ptr)
2993			return 0;
2994
2995		/* All inline items iterated, fall through */
2996	}
2997
2998	/* We're at keyed items, there is no inline item, go to the next one */
2999	extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
3000	ret = btrfs_next_item(extent_root, iter->path);
3001	if (ret)
3002		return ret;
3003
3004	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
3005	if (iter->cur_key.objectid != iter->bytenr ||
3006	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3007	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
3008		return 1;
3009	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
3010					path->slots[0]);
3011	iter->cur_ptr = iter->item_ptr;
3012	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
3013						path->slots[0]);
3014	return 0;
3015}
3016
3017void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
3018			      struct btrfs_backref_cache *cache, bool is_reloc)
3019{
3020	int i;
3021
3022	cache->rb_root = RB_ROOT;
3023	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3024		INIT_LIST_HEAD(&cache->pending[i]);
3025	INIT_LIST_HEAD(&cache->changed);
3026	INIT_LIST_HEAD(&cache->detached);
3027	INIT_LIST_HEAD(&cache->leaves);
3028	INIT_LIST_HEAD(&cache->pending_edge);
3029	INIT_LIST_HEAD(&cache->useless_node);
3030	cache->fs_info = fs_info;
3031	cache->is_reloc = is_reloc;
3032}
3033
3034struct btrfs_backref_node *btrfs_backref_alloc_node(
3035		struct btrfs_backref_cache *cache, u64 bytenr, int level)
3036{
3037	struct btrfs_backref_node *node;
3038
3039	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
3040	node = kzalloc(sizeof(*node), GFP_NOFS);
3041	if (!node)
3042		return node;
3043
3044	INIT_LIST_HEAD(&node->list);
3045	INIT_LIST_HEAD(&node->upper);
3046	INIT_LIST_HEAD(&node->lower);
3047	RB_CLEAR_NODE(&node->rb_node);
3048	cache->nr_nodes++;
3049	node->level = level;
3050	node->bytenr = bytenr;
3051
3052	return node;
3053}
3054
3055void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
3056			     struct btrfs_backref_node *node)
3057{
3058	if (node) {
3059		ASSERT(list_empty(&node->list));
3060		ASSERT(list_empty(&node->lower));
3061		ASSERT(node->eb == NULL);
3062		cache->nr_nodes--;
3063		btrfs_put_root(node->root);
3064		kfree(node);
3065	}
3066}
3067
3068struct btrfs_backref_edge *btrfs_backref_alloc_edge(
3069		struct btrfs_backref_cache *cache)
3070{
3071	struct btrfs_backref_edge *edge;
3072
3073	edge = kzalloc(sizeof(*edge), GFP_NOFS);
3074	if (edge)
3075		cache->nr_edges++;
3076	return edge;
3077}
3078
3079void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
3080			     struct btrfs_backref_edge *edge)
3081{
3082	if (edge) {
3083		cache->nr_edges--;
3084		kfree(edge);
3085	}
3086}
3087
3088void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node)
3089{
3090	if (node->locked) {
3091		btrfs_tree_unlock(node->eb);
3092		node->locked = 0;
3093	}
3094}
3095
3096void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node)
3097{
3098	if (node->eb) {
3099		btrfs_backref_unlock_node_buffer(node);
3100		free_extent_buffer(node->eb);
3101		node->eb = NULL;
3102	}
3103}
3104
3105/*
3106 * Drop the backref node from cache without cleaning up its children
3107 * edges.
3108 *
3109 * This can only be called on node without parent edges.
3110 * The children edges are still kept as is.
3111 */
3112void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
3113			     struct btrfs_backref_node *node)
3114{
3115	ASSERT(list_empty(&node->upper));
3116
3117	btrfs_backref_drop_node_buffer(node);
3118	list_del_init(&node->list);
3119	list_del_init(&node->lower);
3120	if (!RB_EMPTY_NODE(&node->rb_node))
3121		rb_erase(&node->rb_node, &tree->rb_root);
3122	btrfs_backref_free_node(tree, node);
3123}
3124
3125/*
3126 * Drop the backref node from cache, also cleaning up all its
3127 * upper edges and any uncached nodes in the path.
3128 *
3129 * This cleanup happens bottom up, thus the node should either
3130 * be the lowest node in the cache or a detached node.
3131 */
3132void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3133				struct btrfs_backref_node *node)
3134{
3135	struct btrfs_backref_node *upper;
3136	struct btrfs_backref_edge *edge;
3137
3138	if (!node)
3139		return;
3140
3141	BUG_ON(!node->lowest && !node->detached);
3142	while (!list_empty(&node->upper)) {
3143		edge = list_entry(node->upper.next, struct btrfs_backref_edge,
3144				  list[LOWER]);
3145		upper = edge->node[UPPER];
3146		list_del(&edge->list[LOWER]);
3147		list_del(&edge->list[UPPER]);
3148		btrfs_backref_free_edge(cache, edge);
3149
3150		/*
3151		 * Add the node to leaf node list if no other child block
3152		 * cached.
3153		 */
3154		if (list_empty(&upper->lower)) {
3155			list_add_tail(&upper->lower, &cache->leaves);
3156			upper->lowest = 1;
3157		}
3158	}
3159
3160	btrfs_backref_drop_node(cache, node);
3161}
3162
3163/*
3164 * Release all nodes/edges from current cache
3165 */
3166void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3167{
3168	struct btrfs_backref_node *node;
3169	int i;
3170
3171	while (!list_empty(&cache->detached)) {
3172		node = list_entry(cache->detached.next,
3173				  struct btrfs_backref_node, list);
3174		btrfs_backref_cleanup_node(cache, node);
3175	}
3176
3177	while (!list_empty(&cache->leaves)) {
3178		node = list_entry(cache->leaves.next,
3179				  struct btrfs_backref_node, lower);
3180		btrfs_backref_cleanup_node(cache, node);
3181	}
3182
3183	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3184		while (!list_empty(&cache->pending[i])) {
3185			node = list_first_entry(&cache->pending[i],
3186						struct btrfs_backref_node,
3187						list);
3188			btrfs_backref_cleanup_node(cache, node);
3189		}
3190	}
3191	ASSERT(list_empty(&cache->pending_edge));
3192	ASSERT(list_empty(&cache->useless_node));
3193	ASSERT(list_empty(&cache->changed));
3194	ASSERT(list_empty(&cache->detached));
3195	ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3196	ASSERT(!cache->nr_nodes);
3197	ASSERT(!cache->nr_edges);
3198}
3199
3200void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
3201			     struct btrfs_backref_node *lower,
3202			     struct btrfs_backref_node *upper,
3203			     int link_which)
3204{
3205	ASSERT(upper && lower && upper->level == lower->level + 1);
3206	edge->node[LOWER] = lower;
3207	edge->node[UPPER] = upper;
3208	if (link_which & LINK_LOWER)
3209		list_add_tail(&edge->list[LOWER], &lower->upper);
3210	if (link_which & LINK_UPPER)
3211		list_add_tail(&edge->list[UPPER], &upper->lower);
3212}
3213/*
3214 * Handle direct tree backref
3215 *
3216 * Direct tree backref means, the backref item shows its parent bytenr
3217 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3218 *
3219 * @ref_key:	The converted backref key.
3220 *		For keyed backref, it's the item key.
3221 *		For inlined backref, objectid is the bytenr,
3222 *		type is btrfs_inline_ref_type, offset is
3223 *		btrfs_inline_ref_offset.
3224 */
3225static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3226				      struct btrfs_key *ref_key,
3227				      struct btrfs_backref_node *cur)
3228{
3229	struct btrfs_backref_edge *edge;
3230	struct btrfs_backref_node *upper;
3231	struct rb_node *rb_node;
3232
3233	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3234
3235	/* Only reloc root uses backref pointing to itself */
3236	if (ref_key->objectid == ref_key->offset) {
3237		struct btrfs_root *root;
3238
3239		cur->is_reloc_root = 1;
3240		/* Only reloc backref cache cares about a specific root */
3241		if (cache->is_reloc) {
3242			root = find_reloc_root(cache->fs_info, cur->bytenr);
3243			if (!root)
3244				return -ENOENT;
3245			cur->root = root;
3246		} else {
3247			/*
3248			 * For generic purpose backref cache, reloc root node
3249			 * is useless.
3250			 */
3251			list_add(&cur->list, &cache->useless_node);
3252		}
3253		return 0;
3254	}
3255
3256	edge = btrfs_backref_alloc_edge(cache);
3257	if (!edge)
3258		return -ENOMEM;
3259
3260	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3261	if (!rb_node) {
3262		/* Parent node not yet cached */
3263		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3264					   cur->level + 1);
3265		if (!upper) {
3266			btrfs_backref_free_edge(cache, edge);
3267			return -ENOMEM;
3268		}
3269
3270		/*
3271		 *  Backrefs for the upper level block isn't cached, add the
3272		 *  block to pending list
3273		 */
3274		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3275	} else {
3276		/* Parent node already cached */
3277		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3278		ASSERT(upper->checked);
3279		INIT_LIST_HEAD(&edge->list[UPPER]);
3280	}
3281	btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3282	return 0;
3283}
3284
3285/*
3286 * Handle indirect tree backref
3287 *
3288 * Indirect tree backref means, we only know which tree the node belongs to.
3289 * We still need to do a tree search to find out the parents. This is for
3290 * TREE_BLOCK_REF backref (keyed or inlined).
3291 *
3292 * @trans:	Transaction handle.
3293 * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
3294 * @tree_key:	The first key of this tree block.
3295 * @path:	A clean (released) path, to avoid allocating path every time
3296 *		the function get called.
3297 */
3298static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
3299					struct btrfs_backref_cache *cache,
3300					struct btrfs_path *path,
3301					struct btrfs_key *ref_key,
3302					struct btrfs_key *tree_key,
3303					struct btrfs_backref_node *cur)
3304{
3305	struct btrfs_fs_info *fs_info = cache->fs_info;
3306	struct btrfs_backref_node *upper;
3307	struct btrfs_backref_node *lower;
3308	struct btrfs_backref_edge *edge;
3309	struct extent_buffer *eb;
3310	struct btrfs_root *root;
3311	struct rb_node *rb_node;
3312	int level;
3313	bool need_check = true;
3314	int ret;
3315
3316	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3317	if (IS_ERR(root))
3318		return PTR_ERR(root);
3319	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3320		cur->cowonly = 1;
3321
3322	if (btrfs_root_level(&root->root_item) == cur->level) {
3323		/* Tree root */
3324		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3325		/*
3326		 * For reloc backref cache, we may ignore reloc root.  But for
3327		 * general purpose backref cache, we can't rely on
3328		 * btrfs_should_ignore_reloc_root() as it may conflict with
3329		 * current running relocation and lead to missing root.
3330		 *
3331		 * For general purpose backref cache, reloc root detection is
3332		 * completely relying on direct backref (key->offset is parent
3333		 * bytenr), thus only do such check for reloc cache.
3334		 */
3335		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3336			btrfs_put_root(root);
3337			list_add(&cur->list, &cache->useless_node);
3338		} else {
3339			cur->root = root;
3340		}
3341		return 0;
3342	}
3343
3344	level = cur->level + 1;
3345
3346	/* Search the tree to find parent blocks referring to the block */
3347	path->search_commit_root = 1;
3348	path->skip_locking = 1;
3349	path->lowest_level = level;
3350	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3351	path->lowest_level = 0;
3352	if (ret < 0) {
3353		btrfs_put_root(root);
3354		return ret;
3355	}
3356	if (ret > 0 && path->slots[level] > 0)
3357		path->slots[level]--;
3358
3359	eb = path->nodes[level];
3360	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3361		btrfs_err(fs_info,
3362"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3363			  cur->bytenr, level - 1, btrfs_root_id(root),
3364			  tree_key->objectid, tree_key->type, tree_key->offset);
3365		btrfs_put_root(root);
3366		ret = -ENOENT;
3367		goto out;
3368	}
3369	lower = cur;
3370
3371	/* Add all nodes and edges in the path */
3372	for (; level < BTRFS_MAX_LEVEL; level++) {
3373		if (!path->nodes[level]) {
3374			ASSERT(btrfs_root_bytenr(&root->root_item) ==
3375			       lower->bytenr);
3376			/* Same as previous should_ignore_reloc_root() call */
3377			if (btrfs_should_ignore_reloc_root(root) &&
3378			    cache->is_reloc) {
3379				btrfs_put_root(root);
3380				list_add(&lower->list, &cache->useless_node);
3381			} else {
3382				lower->root = root;
3383			}
3384			break;
3385		}
3386
3387		edge = btrfs_backref_alloc_edge(cache);
3388		if (!edge) {
3389			btrfs_put_root(root);
3390			ret = -ENOMEM;
3391			goto out;
3392		}
3393
3394		eb = path->nodes[level];
3395		rb_node = rb_simple_search(&cache->rb_root, eb->start);
3396		if (!rb_node) {
3397			upper = btrfs_backref_alloc_node(cache, eb->start,
3398							 lower->level + 1);
3399			if (!upper) {
3400				btrfs_put_root(root);
3401				btrfs_backref_free_edge(cache, edge);
3402				ret = -ENOMEM;
3403				goto out;
3404			}
3405			upper->owner = btrfs_header_owner(eb);
3406			if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3407				upper->cowonly = 1;
3408
3409			/*
3410			 * If we know the block isn't shared we can avoid
3411			 * checking its backrefs.
3412			 */
3413			if (btrfs_block_can_be_shared(trans, root, eb))
3414				upper->checked = 0;
3415			else
3416				upper->checked = 1;
3417
3418			/*
3419			 * Add the block to pending list if we need to check its
3420			 * backrefs, we only do this once while walking up a
3421			 * tree as we will catch anything else later on.
3422			 */
3423			if (!upper->checked && need_check) {
3424				need_check = false;
3425				list_add_tail(&edge->list[UPPER],
3426					      &cache->pending_edge);
3427			} else {
3428				if (upper->checked)
3429					need_check = true;
3430				INIT_LIST_HEAD(&edge->list[UPPER]);
3431			}
3432		} else {
3433			upper = rb_entry(rb_node, struct btrfs_backref_node,
3434					 rb_node);
3435			ASSERT(upper->checked);
3436			INIT_LIST_HEAD(&edge->list[UPPER]);
3437			if (!upper->owner)
3438				upper->owner = btrfs_header_owner(eb);
3439		}
3440		btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3441
3442		if (rb_node) {
3443			btrfs_put_root(root);
3444			break;
3445		}
3446		lower = upper;
3447		upper = NULL;
3448	}
3449out:
3450	btrfs_release_path(path);
3451	return ret;
3452}
3453
3454/*
3455 * Add backref node @cur into @cache.
3456 *
3457 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3458 *	 links aren't yet bi-directional. Needs to finish such links.
3459 *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
3460 *
3461 * @trans:	Transaction handle.
3462 * @path:	Released path for indirect tree backref lookup
3463 * @iter:	Released backref iter for extent tree search
3464 * @node_key:	The first key of the tree block
3465 */
3466int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
3467				struct btrfs_backref_cache *cache,
3468				struct btrfs_path *path,
3469				struct btrfs_backref_iter *iter,
3470				struct btrfs_key *node_key,
3471				struct btrfs_backref_node *cur)
3472{
 
3473	struct btrfs_backref_edge *edge;
3474	struct btrfs_backref_node *exist;
3475	int ret;
3476
3477	ret = btrfs_backref_iter_start(iter, cur->bytenr);
3478	if (ret < 0)
3479		return ret;
3480	/*
3481	 * We skip the first btrfs_tree_block_info, as we don't use the key
3482	 * stored in it, but fetch it from the tree block
3483	 */
3484	if (btrfs_backref_has_tree_block_info(iter)) {
3485		ret = btrfs_backref_iter_next(iter);
3486		if (ret < 0)
3487			goto out;
3488		/* No extra backref? This means the tree block is corrupted */
3489		if (ret > 0) {
3490			ret = -EUCLEAN;
3491			goto out;
3492		}
3493	}
3494	WARN_ON(cur->checked);
3495	if (!list_empty(&cur->upper)) {
3496		/*
3497		 * The backref was added previously when processing backref of
3498		 * type BTRFS_TREE_BLOCK_REF_KEY
3499		 */
3500		ASSERT(list_is_singular(&cur->upper));
3501		edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3502				  list[LOWER]);
3503		ASSERT(list_empty(&edge->list[UPPER]));
3504		exist = edge->node[UPPER];
3505		/*
3506		 * Add the upper level block to pending list if we need check
3507		 * its backrefs
3508		 */
3509		if (!exist->checked)
3510			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3511	} else {
3512		exist = NULL;
3513	}
3514
3515	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3516		struct extent_buffer *eb;
3517		struct btrfs_key key;
3518		int type;
3519
3520		cond_resched();
3521		eb = iter->path->nodes[0];
3522
3523		key.objectid = iter->bytenr;
3524		if (btrfs_backref_iter_is_inline_ref(iter)) {
3525			struct btrfs_extent_inline_ref *iref;
3526
3527			/* Update key for inline backref */
3528			iref = (struct btrfs_extent_inline_ref *)
3529				((unsigned long)iter->cur_ptr);
3530			type = btrfs_get_extent_inline_ref_type(eb, iref,
3531							BTRFS_REF_TYPE_BLOCK);
3532			if (type == BTRFS_REF_TYPE_INVALID) {
3533				ret = -EUCLEAN;
3534				goto out;
3535			}
3536			key.type = type;
3537			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3538		} else {
3539			key.type = iter->cur_key.type;
3540			key.offset = iter->cur_key.offset;
3541		}
3542
3543		/*
3544		 * Parent node found and matches current inline ref, no need to
3545		 * rebuild this node for this inline ref
3546		 */
3547		if (exist &&
3548		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3549		      exist->owner == key.offset) ||
3550		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3551		      exist->bytenr == key.offset))) {
3552			exist = NULL;
3553			continue;
3554		}
3555
3556		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3557		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3558			ret = handle_direct_tree_backref(cache, &key, cur);
3559			if (ret < 0)
3560				goto out;
3561		} else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
3562			/*
3563			 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
3564			 * offset means the root objectid. We need to search
3565			 * the tree to get its parent bytenr.
3566			 */
3567			ret = handle_indirect_tree_backref(trans, cache, path,
3568							   &key, node_key, cur);
3569			if (ret < 0)
3570				goto out;
3571		}
 
3572		/*
3573		 * Unrecognized tree backref items (if it can pass tree-checker)
3574		 * would be ignored.
 
3575		 */
 
 
 
 
3576	}
3577	ret = 0;
3578	cur->checked = 1;
3579	WARN_ON(exist);
3580out:
3581	btrfs_backref_iter_release(iter);
3582	return ret;
3583}
3584
3585/*
3586 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3587 */
3588int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3589				     struct btrfs_backref_node *start)
3590{
3591	struct list_head *useless_node = &cache->useless_node;
3592	struct btrfs_backref_edge *edge;
3593	struct rb_node *rb_node;
3594	LIST_HEAD(pending_edge);
3595
3596	ASSERT(start->checked);
3597
3598	/* Insert this node to cache if it's not COW-only */
3599	if (!start->cowonly) {
3600		rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3601					   &start->rb_node);
3602		if (rb_node)
3603			btrfs_backref_panic(cache->fs_info, start->bytenr,
3604					    -EEXIST);
3605		list_add_tail(&start->lower, &cache->leaves);
3606	}
3607
3608	/*
3609	 * Use breadth first search to iterate all related edges.
3610	 *
3611	 * The starting points are all the edges of this node
3612	 */
3613	list_for_each_entry(edge, &start->upper, list[LOWER])
3614		list_add_tail(&edge->list[UPPER], &pending_edge);
3615
3616	while (!list_empty(&pending_edge)) {
3617		struct btrfs_backref_node *upper;
3618		struct btrfs_backref_node *lower;
3619
3620		edge = list_first_entry(&pending_edge,
3621				struct btrfs_backref_edge, list[UPPER]);
3622		list_del_init(&edge->list[UPPER]);
3623		upper = edge->node[UPPER];
3624		lower = edge->node[LOWER];
3625
3626		/* Parent is detached, no need to keep any edges */
3627		if (upper->detached) {
3628			list_del(&edge->list[LOWER]);
3629			btrfs_backref_free_edge(cache, edge);
3630
3631			/* Lower node is orphan, queue for cleanup */
3632			if (list_empty(&lower->upper))
3633				list_add(&lower->list, useless_node);
3634			continue;
3635		}
3636
3637		/*
3638		 * All new nodes added in current build_backref_tree() haven't
3639		 * been linked to the cache rb tree.
3640		 * So if we have upper->rb_node populated, this means a cache
3641		 * hit. We only need to link the edge, as @upper and all its
3642		 * parents have already been linked.
3643		 */
3644		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3645			if (upper->lowest) {
3646				list_del_init(&upper->lower);
3647				upper->lowest = 0;
3648			}
3649
3650			list_add_tail(&edge->list[UPPER], &upper->lower);
3651			continue;
3652		}
3653
3654		/* Sanity check, we shouldn't have any unchecked nodes */
3655		if (!upper->checked) {
3656			ASSERT(0);
3657			return -EUCLEAN;
3658		}
3659
3660		/* Sanity check, COW-only node has non-COW-only parent */
3661		if (start->cowonly != upper->cowonly) {
3662			ASSERT(0);
3663			return -EUCLEAN;
3664		}
3665
3666		/* Only cache non-COW-only (subvolume trees) tree blocks */
3667		if (!upper->cowonly) {
3668			rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3669						   &upper->rb_node);
3670			if (rb_node) {
3671				btrfs_backref_panic(cache->fs_info,
3672						upper->bytenr, -EEXIST);
3673				return -EUCLEAN;
3674			}
3675		}
3676
3677		list_add_tail(&edge->list[UPPER], &upper->lower);
3678
3679		/*
3680		 * Also queue all the parent edges of this uncached node
3681		 * to finish the upper linkage
3682		 */
3683		list_for_each_entry(edge, &upper->upper, list[LOWER])
3684			list_add_tail(&edge->list[UPPER], &pending_edge);
3685	}
3686	return 0;
3687}
3688
3689void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3690				 struct btrfs_backref_node *node)
3691{
3692	struct btrfs_backref_node *lower;
3693	struct btrfs_backref_node *upper;
3694	struct btrfs_backref_edge *edge;
3695
3696	while (!list_empty(&cache->useless_node)) {
3697		lower = list_first_entry(&cache->useless_node,
3698				   struct btrfs_backref_node, list);
3699		list_del_init(&lower->list);
3700	}
3701	while (!list_empty(&cache->pending_edge)) {
3702		edge = list_first_entry(&cache->pending_edge,
3703				struct btrfs_backref_edge, list[UPPER]);
3704		list_del(&edge->list[UPPER]);
3705		list_del(&edge->list[LOWER]);
3706		lower = edge->node[LOWER];
3707		upper = edge->node[UPPER];
3708		btrfs_backref_free_edge(cache, edge);
3709
3710		/*
3711		 * Lower is no longer linked to any upper backref nodes and
3712		 * isn't in the cache, we can free it ourselves.
3713		 */
3714		if (list_empty(&lower->upper) &&
3715		    RB_EMPTY_NODE(&lower->rb_node))
3716			list_add(&lower->list, &cache->useless_node);
3717
3718		if (!RB_EMPTY_NODE(&upper->rb_node))
3719			continue;
3720
3721		/* Add this guy's upper edges to the list to process */
3722		list_for_each_entry(edge, &upper->upper, list[LOWER])
3723			list_add_tail(&edge->list[UPPER],
3724				      &cache->pending_edge);
3725		if (list_empty(&upper->upper))
3726			list_add(&upper->list, &cache->useless_node);
3727	}
3728
3729	while (!list_empty(&cache->useless_node)) {
3730		lower = list_first_entry(&cache->useless_node,
3731				   struct btrfs_backref_node, list);
3732		list_del_init(&lower->list);
3733		if (lower == node)
3734			node = NULL;
3735		btrfs_backref_drop_node(cache, lower);
3736	}
3737
3738	btrfs_backref_cleanup_node(cache, node);
3739	ASSERT(list_empty(&cache->useless_node) &&
3740	       list_empty(&cache->pending_edge));
3741}